repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
MCEvidence | MCEvidence-master/planck_mcevidence.py | '''
Planck MCMC chains evidence analysis. The data is available from [1].
Parameters
---------
Parallized version to compute evidence from Planck chains
We will analyze all schains in PLA folder
Returns
---------
The code writes results to terminal as well as a file. The default path
to the output files is
.. path:: planck_mce_fullGrid_R2/
Notes
---------
The full analysis using a single MPI process takes about ~30mins.
Examples
---------
To run the evidence estimation using 6 MPI processes
.. shell:: mpirun -np 6 python mce_pla.py
References
-----------
.. [1] Fullgrid Planck MCMC chains:
http://irsa.ipac.caltech.edu/data/Planck/release_2/ancillary-data/cosmoparams/COM_CosmoParams_fullGrid_R2.00.tar.gz
'''
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import glob
import numpy as np
import pandas as pd
from tabulate import tabulate
import pickle
import fileinput
from mpi4py import MPI
from argparse import ArgumentParser
import logging
#
from MCEvidence import MCEvidence
def h0_gauss_lnp(ParSamples,H0=73.24,H0_Err=1.74):
frac=(ParSamples.H0 - H0)/H0_Err
return 0.5*((frac**2.0))
#---------------------------------------
#---- Extract command line arguments ---
#---------------------------------------
parser = ArgumentParser(description='Planck Chains MCEvidence.')
# Add options
parser.add_argument("-k", "--kmax",
dest="kmax",
default=2,
type=int,
help="scikit maximum K-NN ")
parser.add_argument("-nc", "--nchain",
dest="nchain",
default=0,
type=int,
help="How many chains to use (default=None - use all available) ")
parser.add_argument("-nd", "--ndata",
dest="ndata",
default=0,
type=int,
help="How many data cases to use (default=None - use all available) ")
parser.add_argument("-nm", "--nmodel",
dest="nmodel",
default=0,
type=int,
help="How many model cases to use (default=None - use all chains) ")
parser.add_argument("-b","--burnfrac", "--burnin","--remove",
dest="burnfrac",
default=0,
type=float,
help="Burn-in fraction")
parser.add_argument("-t","--thin", "--thinfrac",
dest="thinfrac",
default=0,
type=float,
help="Thinning fraction")
parser.add_argument("-o","--out", "--outdir",
dest="outdir",
default='planck_mce_fullGrid_R2_H0Reiss2016',
help="Output directory name")
parser.add_argument("--N","--name",
dest="name",
default='mce',
help="base name for output files")
parser.add_argument("-v", "--verbose",
dest="verbose",
default=1,
type=int,
help="increase output verbosity")
args = parser.parse_args()
#-----------------------------
#------ control parameters----
#-----------------------------
kmax=args.kmax
nchain=args.nchain
nmodel=args.nmodel
ndata=args.ndata
outdir=args.outdir
basename=args.name
burnfrac=args.burnfrac
thinfrac=args.thinfrac
verbose=args.verbose
#assert that kmax, the maximum kth nearest
#neighbour to use is >=2
assert kmax >= 2,'kmax must be >=2'
#---------------------------------------
#----- set basic logging
#---------------------------------------
if verbose==0:
logging.basicConfig(level=logging.WARNING)
elif verbose==1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.DEBUG)
#
logger = logging.getLogger(__name__)
#-----------------------------
#-------- Initialize MPI -----
#-----------------------------
def mpi_load_balance(MpiSize,nload):
nmpi_pp=np.zeros(MpiSize,dtype=np.int)
nmpi_pp[:]=nload/MpiSize
r=nload%MpiSize
if r != 0:
nmpi_pp[1:r-1]=nmpi_pp[1:r-1]+1
return nmpi_pp
mpi_size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
comm = MPI.COMM_WORLD
#print all command line arguments passed
if rank==0:
print(args)
## If parallel MPI-IO is to be used
#amode=MPI.MODE_WRONLY
#fhandle = MPI.File.Open(comm, fout, amode)
#---------------------------------------------------
#------- Path and sub-directory folders ------------
#---------------------------------------------------
rootdir='COM_CosmoParams_fullGrid_R2.00'
#list of cosmology parameters
cosmo_params=['omegabh2','omegach2','theta','tau','omegak','mnu','meffsterile','w','wa',
'nnu','yhe','alpha1','deltazrei','Alens','Alensf','fdm','logA','ns','nrun',
'nrunrun','r','nt','ntrun','Aphiphi']
# Types of model to consider. Below a more
# comprehensive list is defined using wildcards.
# The function avail_data_list() extracts all data names
# available in the planck fullgrid directory.
DataSets=['plikHM_TT_lowTEB','plikHM_TT_lowTEB_post_BAO','plikHM_TT_lowTEB_post_lensing','plikHM_TT_lowTEB_post_H070p6','plikHM_TT_lowTEB_post_JLA','plikHM_TT_lowTEB_post_zre6p5','plikHM_TT_lowTEB_post_BAO_H070p6_JLA','plikHM_TT_lowTEB_post_lensing_BAO_H070p6_JLA','plikHM_TT_lowTEB_BAO','plikHM_TT_lowTEB_BAO_post_lensing','plikHM_TT_lowTEB_BAO_post_H070p6','plikHM_TT_lowTEB_BAO_post_H070p6_JLA','plikHM_TT_lowTEB_lensing','plikHM_TT_lowTEB_lensing_post_BAO','plikHM_TT_lowTEB_lensing_post_zre6p5','plikHM_TT_lowTEB_lensing_post_BAO_H070p6_JLA','plikHM_TT_tau07plikHM_TT_lowTEB_lensing_BAO','plikHM_TT_lowTEB_lensing_BAO_post_H070p6','plikHM_TT_lowTEB_lensing_BAO_post_H070p6_JLA','plikHM_TTTEEE_lowTEB','plikHM_TTTEEE_lowTEB_post_BAO','plikHM_TTTEEE_lowTEB_post_lensing','plikHM_TTTEEE_lowTEB_post_H070p6','plikHM_TTTEEE_lowTEB_post_JLA','plikHM_TTTEEE_lowTEB_post_zre6p5','plikHM_TTTEEE_lowTEB_post_BAO_H070p6_JLA','plikHM_TTTEEE_lowTEB_post_lensing_BAO_H070p6_JLA','plikHM_TTTEEE_lowl_lensing','plikHM_TTTEEE_lowl_lensing_post_BAO','plikHM_TTTEEE_lowl_lensing_post_BAO_H070p6_JLA','plikHM_TTTEEE_lowTEB_lensing']
# Types of model to consider. Below a more
# comprehensive list is defined using wildcards.
# The function avail_model_list() extracts all data names
# available in the planck fullgrid directory.
Models={}
Models['model']=['base','base_omegak','base_Alens','base_Alensf','base_nnu','base_mnu',\
'base_nrun','base_r','base_w','base_alpha1','base_Aphiphi','base_yhe',\
'base_mnu_Alens','base_mnu_omegak','base_mnu_w','base_nnu_mnu',\
'base_nnu_r','base_nrun_r','base_nnu_yhe','base_w_wa',\
'base_nnu_meffsterile','base_nnu_meffsterile_r']
#---------------------------------------
#-------- define some useful functions -
#---------------------------------------
def avail_data_list(mm):
'''
Given model name, extract all available data names
'''
l=glob.glob( '{0}/{1}/*/*_1.txt'.format(rootdir,mm) )
l1=[x.split('_1')[0] for x in l]
l2=[x.split('base_')[1] for x in l1]
return l1,l2
def avail_model_list(dd,nmax=0,sorter=Models['model']):
'''
Given data name, extract all available models
If sorter is not None, sorting will be based
according to the order of sorter
'''
df=pd.DataFrame()
l=glob.glob( '{0}/*/*/*_{1}_1.txt'.format(rootdir,dd) )
df['l1']=[x.split('_1')[0] for x in l]
df['l2']=df['l1'].apply(lambda x:x.split('/')[1])
#sort df based on sorter order
if sorter:
df['l2'] = df['l2'].astype("category")
df['l2'].cat.set_categories(sorter, inplace=True)
df=df.sort_values('l2')
if nmax>0:
df=df.iloc[0:nmax]
return df['l1'].values,df['l2'].values
def iscosmo_param(p,l=cosmo_params):
'''
check if parameter 'p' is cosmological or nuisance
'''
return p in l
def params_info(fname):
'''
Extract parameter names, ranges, and prior space volume
from CosmoMC *.ranges file
'''
par=np.genfromtxt(fname+'.ranges',dtype=None,names=('name','min','max'))#,unpack=True)
parName=par['name']
parMin=par['min']
parMax=par['max']
parMC={'name':[],'min':[],'max':[],'range':[]}
for p,pmin,pmax in zip(parName, parMin,parMax):
if not np.isclose(pmax,pmin) and iscosmo_param(p):
parMC['name'].append(p)
parMC['min'].append(pmin)
parMC['max'].append(pmax)
parMC['range'].append(np.abs(pmax-pmin))
#
parMC['str']=','.join(parMC['name'])
parMC['ndim']=len(parMC['name'])
parMC['volume']=np.array(parMC['range']).prod()
return parMC
#----------------------------------------------------------
#------- define which model, data etc. list to be used ----
#----------------------------------------------------------
if ndata>0:
data_list=DataSets[0:ndata]
else:
data_list=DataSets
model_list=Models['model']
logger.debug('len(data_list)={}, len(model_list)={}'.format(len(data_list),len(model_list)))
#------ mpi load balancing ---
main_loop_list=data_list
nload=len(main_loop_list)
lpp=mpi_load_balance(mpi_size,nload)
#--------------------------------------
#----Evidence calculation starts here -
#--------------------------------------
all_df={} #dictionary to store all results
if nchain == 0: #use all available chains
mce_cols=['AllChains']
chains_extension_list=['']
outdir='{}/AllChains/'.format(outdir)
outdir_data='{}/csv'.format(outdir)
else:
chains_extension_list=['_%s.txt'%x for x in range(1,nchain+1)]
mce_cols=['chain%s'%k for k in range(1,nchain+1)]
outdir='{}/SingleChains/'.format(outdir)
outdir_data='{}/csv'.format(outdir)
fout_txt='%s/%s_{}.txt'%(outdir,basename)
fout_df='%s/%s_{}.csv'%(outdir_data,basename)
if not os.path.exists(outdir_data):
os.makedirs(outdir_data)
# Column names for useful information outputs
mce_info_cols=['PriorVol','ndim','N_read','N_used']
for ipp in range(lpp[rank]): #loop over data
ig=ipp+lpp[0:rank-1].sum()
kk=main_loop_list[ig]
logger.debug('*** mpi_rank, idd, loop_key',rank, ig, kk)
kk_name='data'
idd=ig
dd=kk
dd_dir=dd.split('_post_')[0]
dd_name=dd #dd.split('plikHM_')[0]
path_list, name=avail_model_list(dd,nmax=nmodel)
mce=np.zeros((len(path_list),len(mce_cols)))
mce_info={ k:[] for k in mce_info_cols }
# prior volumes will be normalized by
# the volume of the base model
vol_norm=1.0
for imm,mm,fname in zip(range(len(name)),name, path_list): #model loop
if glob.glob(fname+'*.txt'):
parMC=params_info(fname)
if mm=='base': #base model
vol_norm=parMC['volume']
prior_volume=parMC['volume']/vol_norm
ndim=parMC['ndim']
#
mce_info['PriorVol'].append(prior_volume)
mce_info['ndim'].append(ndim)
#
logger.debug('***model: {}, ndim:{}, volume:{}, name={}'.format(mm,ndim,prior_volume,parMC['name']))
#
nc_read=''
nc_use=''
for icc, cext in enumerate(chains_extension_list):
fchain=fname+cext
e,info = MCEvidence(fchain,ndim=ndim,isfunc=h0_gauss_lnp,
priorvolume=prior_volume,
kmax=kmax,verbose=verbose,burnlen=burnfrac,
thinlen=thinfrac).evidence(info=True,pos_lnp=False)
mce[imm,icc]=e[0]
icc+=1
nc_read=nc_read+'%s,'%info['Nsamples_read']
nc_use=nc_use+'%s,'%info['Nsamples']
mce_info['N_read'].append(nc_read)
mce_info['N_used'].append(nc_use)
else:
print('*** not available: ',fname)
mce[imm,:]=np.nan
mce_info['N_read'].append('')
mce_info['N_used'].append('')
mce_info['PriorVol'].append(0)
mce_info['ndim'].append(0)
# At this stage evidence for a single data and all available
# models is computed. Put the array in pandas DataFrame
# and save it to a file
if not np.all(np.isnan(mce)):
df = pd.DataFrame(mce,index=name,columns=mce_cols)
df_mean=df.mean(axis=1)
if nchain>0:
df_std=df.std(axis=1)
df['Mean_lnE_k1'] =df_mean
df['Err_lnE_k1'] = df_std/np.sqrt(nchain*1.0)
df['delta_lnE_k1'] =df_mean-df_mean.max()
for k in mce_info_cols:
df[k]=mce_info[k]
#collect delta_lnE in a dictionary
all_df[dd] = df['delta_lnE_k1']
# print info
logging.info('--------------- {}---------'.format(kk))
if verbose>0:
print(tabulate(df, headers='keys', tablefmt='psql',floatfmt=".2f", numalign="left"))
#--------- outputs ----------
# first write to text file
fout=fout_txt.format(kk)
logging.info('rank={}, writing file to {}'.format(rank,fout))
fhandle=open(fout, 'w')
if rank==0:
fhandle.write('\n')
fhandle.write('############## RootDirectory={} ########\n'.format(rootdir))
fhandle.write('\n')
fhandle.write('\n')
fhandle.write('************ {} ************'.format(kk))
fhandle.write('\n')
fhandle.write(tabulate(df, headers='keys', tablefmt='psql',floatfmt=".2f", numalign="left"))
fhandle.write('\n')
fhandle.close()
# write dataframe to csv file
fout=fout_df.format(kk)
df.to_csv(fout)
#--------- big MPI loop finish here ----
#--------------------------------
# wait for all process to finish
#--------------------------------
comm.Barrier()
#----------------------------------------------------
#-- concatnate all output text files to a single file
#----------------------------------------------------
if rank==0:
fmain='{}/mce_planck_fullgrid.txt'.format(outdir)
fout_list=[fout_txt.format(kk) for kk in main_loop_list]
print('all outputs being written to ',fmain)
with open(fmain,'w') as outfile:
for fin in fout_list:
if os.path.exists(fin):
with open(fin) as inputfile:
for line in inputfile:
outfile.write(line)
#delete all single files
for fname in fout_list:
if os.path.exists(fname):
os.remove(fname)
#---------------------------------------
# gather all delta_lnE in one big array
#---------------------------------------
all_df = comm.gather(all_df, root=0)
if rank==0:
logger.debug('after gather type(delta_lnE_df)=',type(all_df))
all_df={ k: v for d in all_df for k, v in d.items() }
if verbose>1:
print('after_gather and concat: all_df.keys:',all_df.keys())
# Save a dictionary into a pickle file.
fout_pkl='{0}/delta_lnE_all_dict.pkl'.format(outdir_data)
logger.info('writting : %s '%fout_pkl)
pickle.dump(all_df, open(fout_pkl, "wb") )
#concat all
big_df=pd.DataFrame(index=model_list)
for dd,df in all_df.items():
big_df[dd]=df
#sort big_df based on DataSets order
df=big_df.T
s = pd.Series(df.index.values,dtype='category')
s.cat.set_categories(DataSets, inplace=True)
big_df=df.reindex(s.sort_values()).T
# Save a dictionary into a pickle file.
fout_pkl='{0}/delta_lnE_all_df.pkl'.format(outdir_data)
logger.info('writting : %s '%fout_pkl)
pickle.dump(big_df, open(fout_pkl, "wb") )
# #read
#big_df=pickle.load( open(fout_pkl, "rb" ) )
#
fout='{0}/delta_lnE_all.txt'.format(outdir)
#logger.info('writting : %s '%fout)
fhandle=open(fout, 'w')
fhandle.write('\n')
fhandle.write('############## RootDirectory={} ########\n'.format(rootdir))
fhandle.write('\n')
#print Long Column Names as header
newName=[]
for ik,k in enumerate(big_df.keys()):
nk='C%s'%ik
fhandle.write('# {}={} \n'.format(nk,k))
newName.append(nk)
big_df.columns=newName
fhandle.write(tabulate(big_df, headers='keys', tablefmt='psql',floatfmt=".2f", numalign="left"))
fhandle.write('\n')
fhandle.close()
#---------------------------------------------
| 16,750 | 33.467078 | 1,117 | py |
GXN | GXN-main/main.py | import sys
import os
import torch
import random
import datetime
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
import math
from network import GXN
from mlp_dropout import MLPClassifier
from sklearn import metrics
from util import cmd_args, load_data, sep_data
sys.path.append('%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
model = GXN
print("latent dim is ", cmd_args.latent_dim)
self.s2v = model(latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=cmd_args.feat_dim+cmd_args.attr_dim,
num_edge_feats=0,
k=cmd_args.sortpooling_k,
ks=[cmd_args.k1, cmd_args.k2],
cross_weight=cmd_args.cross_weight,
fuse_weight=cmd_args.fuse_weight,
R=cmd_args.Rhop)
print("num_node_feats: ", cmd_args.feat_dim+cmd_args.attr_dim)
out_dim = cmd_args.out_dim
if out_dim == 0:
out_dim = self.s2v.dense_dim
self.mlp = MLPClassifier(input_size=out_dim,
hidden_size=cmd_args.hidden,
num_class=cmd_args.num_class,
with_dropout=cmd_args.dropout)
def PrepareFeatureLabel(self, batch_graph):
labels = torch.LongTensor(len(batch_graph))
n_nodes = 0
if batch_graph[0].node_tags is not None:
node_tag_flag = True
concat_tag = []
else:
node_tag_flag = False
if batch_graph[0].node_features is not None:
node_feat_flag = True
concat_feat = []
else:
node_feat_flag = False
for i in range(len(batch_graph)):
labels[i] = batch_graph[i].label
n_nodes += batch_graph[i].num_nodes
if node_tag_flag:
concat_tag += batch_graph[i].node_tags
if node_feat_flag:
tmp = batch_graph[i].node_features.type('torch.FloatTensor')
concat_feat.append(tmp)
if node_tag_flag:
concat_tag = torch.LongTensor(concat_tag).view(-1, 1)
node_tag = torch.zeros(n_nodes, cmd_args.feat_dim)
node_tag.scatter_(1, concat_tag, 1)
if node_feat_flag:
node_feat = torch.cat(concat_feat, 0)
if node_feat_flag and node_tag_flag:
node_feat = torch.cat([node_tag.type_as(node_feat), node_feat], 1)
elif node_feat_flag is False and node_tag_flag:
node_feat = node_tag
elif node_feat_flag and node_tag_flag is False:
pass
else:
node_feat = torch.ones(n_nodes, 1)
node_feat = node_feat.to(device)
labels = labels.to(device)
return node_feat, labels
def forward(self, batch_graph, device=torch.device('cpu')):
node_feat, labels = self.PrepareFeatureLabel(batch_graph) # node_feat的尺寸是 [N, D] (DD: n*82)
N, D = node_feat.shape
labels = labels.to(device)
embed, ret_s1, ret_s2 = self.s2v(batch_graph, node_feat, None)
lbl_t_s1 = torch.ones(N)
lbl_f_s1 = torch.zeros(N)
lbl_t_s2 = torch.ones(ret_s2.shape[0]//2)
lbl_f_s2 = torch.zeros(ret_s2.shape[0]//2)
milbl_s1 = torch.cat((lbl_t_s1, lbl_f_s1), 0).to(device)
milbl_s2 = torch.cat((lbl_t_s2, lbl_f_s2), 0).to(device)
logits, cls_loss, acc = self.mlp(embed, labels)
return logits, cls_loss, acc, ret_s1, milbl_s1, ret_s2, milbl_s2
def loop_dataset(g_list, classifier, mi_loss, sample_idxes, epoch, optimizer=None,
bsize=cmd_args.batch_size, device=torch.device('cpu')):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize
pbar = tqdm(range(total_iters), unit='batch')
all_targets = []
all_scores = []
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize: (pos + 1) * bsize]
batch_graph = [g_list[idx] for idx in selected_idx]
targets = [g_list[idx].label for idx in selected_idx]
all_targets += targets
logits, cls_loss, acc, ret_s1, milbl_s1, ret_s2, milbl_s2 = classifier(batch_graph, device)
all_scores.append(logits[:, 1].detach()) # for binary classification
miloss_s1 = mi_loss[0](ret_s1, milbl_s1)
miloss_s2 = mi_loss[1](ret_s2, milbl_s2)
miloss = (miloss_s1 + miloss_s2)/2
loss = cls_loss + miloss*(2-epoch/cmd_args.num_epochs)
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
cls_loss = cls_loss.data.cpu().numpy()
miloss = miloss.data.cpu().numpy()
loss = loss.data.cpu().numpy()
pbar.set_description('cls_loss: %0.5f miloss: %0.5f loss: %0.5f acc: %0.5f' % (cls_loss, miloss, loss, acc))
total_loss.append(np.array([cls_loss, miloss, loss, acc]) * len(selected_idx))
n_samples += len(selected_idx)
# ------------------------------------------------------------------------------------------------------------------
if optimizer is None:
print(acc)
# ------------------------------------------------------------------------------------------------------------------
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
all_scores = torch.cat(all_scores).cpu().numpy()
all_targets = np.array(all_targets)
fpr, tpr, _ = metrics.roc_curve(all_targets, all_scores, pos_label=1)
auc = metrics.auc(fpr, tpr)
avg_loss = np.concatenate((avg_loss, [auc]))
return avg_loss
def count_parameters(model):
total_param = 0
for name, param in model.named_parameters():
if param.requires_grad:
num_param = np.prod(param.size())
if param.dim() > 1:
print(name, ':', 'x'.join(str(x) for x in list(param.size())), '=', num_param)
else:
print(name, ':', num_param)
total_param += num_param
return total_param
def set_randomseed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def model_run(cmd_args, g_list, device, foldidx, first_timstr):
train_graphs, test_graphs = sep_data(cmd_args.data_root, g_list, foldidx)
if cmd_args.sortpooling_k <= 1:
num_nodes_list = sorted([g.num_nodes for g in train_graphs + test_graphs])
cmd_args.sortpooling_k = num_nodes_list[int(math.ceil(cmd_args.sortpooling_k * len(num_nodes_list))) - 1]
cmd_args.sortpooling_k = max(10, cmd_args.sortpooling_k)
print('k used in SortPooling is: ' + str(cmd_args.sortpooling_k))
classifier = Classifier().to(device)
print("Number of Model Parameters: ", count_parameters(classifier))
optimizer = optim.Adam(classifier.parameters(),
lr=cmd_args.learning_rate,
amsgrad=True,
weight_decay=0.001)
train_idxes = list(range(len(train_graphs)))
best_loss = None
max_acc = 0.0
mi_loss = [nn.BCEWithLogitsLoss(), nn.BCEWithLogitsLoss()]
timstr = datetime.datetime.now().strftime("%m%d-%H%M%S")
logfile = './log_%s/log_%s/testlog_%s_%s.txt' % (cmd_args.data, first_timstr, cmd_args.data, timstr)
if not os.path.exists('./log_%s/log_%s' % (cmd_args.data, first_timstr)):
os.makedirs('./log_%s/log_%s' % (cmd_args.data, first_timstr))
if not os.path.exists('./result_%s/result_%s' % (cmd_args.data, first_timstr)):
os.makedirs('./result_%s/result_%s' % (cmd_args.data, first_timstr))
with open('./result_%s/result_%s/acc_result_%s_%s.txt' % (cmd_args.data, first_timstr, cmd_args.data, first_timstr), 'a+') as f:
f.write(str(cmd_args) + '\n')
if not os.path.exists('./checkpoint_%s/time_%s/FOLD%s' % (cmd_args.data, first_timstr, foldidx)):
os.makedirs('./checkpoint_%s/time_%s/FOLD%s' % (cmd_args.data, first_timstr, foldidx))
if cmd_args.weight is not None:
classifier.load_state_dict(torch.load(cmd_args.weight))
classifier.eval()
test_loss = loop_dataset(test_graphs, classifier, mi_loss, list(range(len(test_graphs))), epoch=0, device=device)
with open(logfile, 'a+') as log:
log.write('clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f'
% (test_loss[0], test_loss[1], test_loss[2], test_loss[3], test_loss[4]) + '\n')
print('Best Acc:', test_loss[3])
raise ValueError('Stop Testing')
with open(logfile, 'a+') as log:
log.write(str(cmd_args) + '\n')
log.write('Fold index: ' + str(foldidx) + '\n')
for epoch in range(cmd_args.num_epochs):
random.shuffle(train_idxes)
classifier.train()
avg_loss = loop_dataset(train_graphs, classifier, mi_loss, train_idxes, epoch, optimizer=optimizer, device=device)
avg_loss[4] = 0.0
print('\033[92maverage training of epoch %d: clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f\033[0m'
% (epoch, avg_loss[0], avg_loss[1], avg_loss[2], avg_loss[3], avg_loss[4])) # noqa
classifier.eval()
test_loss = loop_dataset(test_graphs, classifier, mi_loss, list(range(len(test_graphs))), epoch, device=device)
test_loss[4] = 0.0
print('\033[93maverage test of epoch %d: clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f\033[0m'
% (epoch, test_loss[0], test_loss[1], test_loss[2], test_loss[3], test_loss[4])) # noqa
with open(logfile, 'a+') as log:
log.write('test of epoch %d: clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f'
% (epoch, test_loss[0], test_loss[1], test_loss[2], test_loss[3], test_loss[4]) + '\n')
if test_loss[3] > max_acc:
max_acc = test_loss[3]
fname = './checkpoint_%s/time_%s/FOLD%s/model_epoch%s.pt' % (cmd_args.data, first_timstr, foldidx, str(epoch))
torch.save(classifier.state_dict(), fname)
with open('./result_%s/result_%s/acc_result_%s_%s.txt' % (cmd_args.data, first_timstr, cmd_args.data, first_timstr), 'a+') as f:
f.write('\n')
f.write('Fold index: ' + str(foldidx) + '\t')
f.write(str(max_acc) + '\n')
if cmd_args.extract_features:
features, labels = classifier.output_features(train_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_train.txt', torch.cat([labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(), '%.4f')
features, labels = classifier.output_features(test_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_test.txt', torch.cat([labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(), '%.4f')
return max_acc
if __name__ == '__main__':
set_randomseed(cmd_args.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
first_timstr = datetime.datetime.now().strftime("%m%d-%H%M%S")
if cmd_args.data in ['DD', 'PROTEINS']:
g_list = load_data(cmd_args.data_root, degree_as_tag=False)
elif cmd_args.data in ['COLLAB', 'IMDBBINARY', 'IMDBMULTI', 'ENZYMES']:
g_list = load_data(cmd_args.data_root, degree_as_tag=True)
else:
raise ValueError('No such dataset')
# print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
print('# num of classes: ', cmd_args.num_class)
print('Lets start a single-fold validation')
print('start training ------> fold', cmd_args.fold)
model_run(cmd_args, g_list, device, cmd_args.fold, first_timstr)
| 12,080 | 39.676768 | 136 | py |
GXN | GXN-main/network.py | from __future__ import print_function
import os
import ops
import sys
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
sys.path.append('%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from s2v_lib import S2VLIB # noqa
from pytorch_util import weights_init, gnn_spmm # noqa
class GXN(nn.Module):
def __init__(self, output_dim, num_node_feats, num_edge_feats,
latent_dim=[32, 32, 32, 1], k=30, ks=[0.9,0.7],
conv1d_channels=[16, 32],
conv1d_kws=[0, 5],
cross_weight=1.0, fuse_weight=1.0, R=1):
print('Initializing GXN')
super(GXN, self).__init__()
self.latent_dim = latent_dim
self.output_dim = output_dim
self.num_node_feats = num_node_feats
self.num_edge_feats = num_edge_feats
self.k = k
self.total_latent_dim = sum(latent_dim)
conv1d_kws[0] = self.total_latent_dim
self.conv_params = nn.ModuleList()
self.conv_params.append(nn.Linear(num_node_feats, latent_dim[0]))
for i in range(1, len(latent_dim)):
self.conv_params.append(nn.Linear(latent_dim[i-1], latent_dim[i]))
self.conv1d_params1 = nn.Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0])
self.maxpool1d = nn.MaxPool1d(2, 2)
self.conv1d_params2 = nn.Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1)
dense_dim = int((k-2)/2+1)
self.dense_dim = (dense_dim-conv1d_kws[1]+1)*conv1d_channels[1]
if num_edge_feats > 0:
self.w_e2l = nn.Linear(num_edge_feats, latent_dim)
if output_dim > 0:
self.out_params = nn.Linear(self.dense_dim, output_dim)
self.ks = ks
self.gxn = ops.GraphCrossnet(ks, num_node_feats, 97, cross_weight=cross_weight, fuse_weight=fuse_weight, R=R)
weights_init(self)
def forward(self, graph_list, node_feat, edge_feat):
device = torch.device(node_feat.device)
graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
node_degs = [torch.Tensor(graph_list[i].degs)+1 for i in range(len(graph_list))]
node_degs = torch.cat(node_degs).unsqueeze(1)
n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
n2n_sp = n2n_sp.to(device)
e2n_sp = e2n_sp.to(device)
subg_sp = subg_sp.to(device)
node_degs = node_degs.to(device)
node_feat = Variable(node_feat)
if edge_feat is not None:
edge_feat = Variable(edge_feat)
n2n_sp = Variable(n2n_sp)
e2n_sp = Variable(e2n_sp)
subg_sp = Variable(subg_sp)
node_degs = Variable(node_degs)
h = self.sortpooling_embedding(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp, graph_sizes, node_degs)
return h
def sortpooling_embedding(self, node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp, graph_sizes, node_degs):
device = torch.device(node_feat.device)
''' if exists edge feature, concatenate to node feature vector '''
if edge_feat is not None:
input_edge_linear = self.w_e2l(edge_feat)
e2npool_input = gnn_spmm(e2n_sp, input_edge_linear)
node_feat = torch.cat([node_feat, e2npool_input], 1)
''' graph convolution layers '''
A = ops.spec_normalize_adj(n2n_sp).to(device)
ver = 2
if ver == 2:
cur_message_layer, ret_s1, ret_s2 = self.gxn(A, node_feat)
else:
lv = 0
cur_message_layer = node_feat
cat_message_layers = []
while lv < len(self.latent_dim):
n2npool = gnn_spmm(n2n_sp, cur_message_layer) + cur_message_layer # noqa
node_linear = self.conv_params[lv](n2npool) # Y = Y * W
normalized_linear = node_linear.div(node_degs) # Y = D^-1 * Y
cur_message_layer = F.tanh(normalized_linear)
cat_message_layers.append(cur_message_layer)
lv += 1
cur_message_layer = torch.cat(cat_message_layers, 1)
''' sortpooling layer '''
sort_channel = cur_message_layer[:, -1]
batch_sortpooling_graphs = torch.zeros(len(graph_sizes), self.k, self.total_latent_dim).to(device)
batch_sortpooling_graphs = Variable(batch_sortpooling_graphs)
accum_count = 0
for i in range(subg_sp.size()[0]):
to_sort = sort_channel[accum_count: accum_count + graph_sizes[i]]
k = self.k if self.k <= graph_sizes[i] else graph_sizes[i]
_, topk_indices = to_sort.topk(k)
topk_indices += accum_count
sortpooling_graph = cur_message_layer.index_select(0, topk_indices)
if k < self.k:
to_pad = torch.zeros(self.k-k, self.total_latent_dim).to(device)
to_pad = Variable(to_pad)
sortpooling_graph = torch.cat((sortpooling_graph, to_pad), 0)
batch_sortpooling_graphs[i] = sortpooling_graph
accum_count += graph_sizes[i]
''' traditional 1d convlution and dense layers '''
to_conv1d = batch_sortpooling_graphs.view((-1, 1, self.k * self.total_latent_dim))
conv1d_res = self.conv1d_params1(to_conv1d)
conv1d_res = F.relu(conv1d_res)
conv1d_res = self.maxpool1d(conv1d_res)
conv1d_res = self.conv1d_params2(conv1d_res)
conv1d_res = F.relu(conv1d_res)
to_dense = conv1d_res.view(len(graph_sizes), -1)
if self.output_dim > 0:
out_linear = self.out_params(to_dense)
reluact_fp = F.relu(out_linear)
else:
reluact_fp = to_dense
return F.relu(reluact_fp), ret_s1, ret_s2 | 5,853 | 39.652778 | 117 | py |
GXN | GXN-main/util.py | from __future__ import print_function
import random
import os
import numpy as np
import networkx as nx
import argparse
import torch
from sklearn.model_selection import StratifiedKFold
cmd_opt = argparse.ArgumentParser(description='Argparser for graph_classification')
cmd_opt.add_argument('-mode', default='cpu', help='cpu/gpu')
cmd_opt.add_argument('-data_root', default='any', help='The root dir of dataset')
cmd_opt.add_argument('-data', default=None, help='data folder name')
cmd_opt.add_argument('-batch_size', type=int, default=50, help='minibatch size')
cmd_opt.add_argument('-seed', type=int, default=1, help='seed')
cmd_opt.add_argument('-feat_dim', type=int, default=0, help='dimension of discrete node feature (maximum node tag)')
cmd_opt.add_argument('-num_class', type=int, default=0, help='#classes')
cmd_opt.add_argument('-fold', type=int, default=1, help='fold (1..10)')
cmd_opt.add_argument('-test_number', type=int, default=0, help='if specified, will overwrite -fold and use the last -test_number graphs as testing data')
cmd_opt.add_argument('-num_epochs', type=int, default=1000, help='number of epochs')
cmd_opt.add_argument('-latent_dim', type=str, default='64', help='dimension(s) of latent layers')
cmd_opt.add_argument('-k1', type=float, default=0.9, help='The scale proportion of scale 1')
cmd_opt.add_argument('-k2', type=float, default=0.7, help='The scale proportion of scale 2')
cmd_opt.add_argument('-sortpooling_k', type=float, default=30, help='number of nodes kept after SortPooling')
cmd_opt.add_argument('-out_dim', type=int, default=1024, help='s2v output size')
cmd_opt.add_argument('-hidden', type=int, default=100, help='dimension of regression')
cmd_opt.add_argument('-max_lv', type=int, default=4, help='max rounds of message passing')
cmd_opt.add_argument('-learning_rate', type=float, default=0.0001, help='init learning_rate')
cmd_opt.add_argument('-dropout', type=bool, default=False, help='whether add dropout after dense layer')
cmd_opt.add_argument('-extract_features', type=bool, default=False, help='whether to extract final graph features')
cmd_opt.add_argument('-cross_weight', type=float, default=1.0, help='weights for hidden layer cross')
cmd_opt.add_argument('-fuse_weight', type=float, default=1.0, help='weights for final fuse')
cmd_opt.add_argument('-Rhop', type=int, default=1, help='neighborhood hop')
cmd_opt.add_argument('-weight', type=str, default=None, help='saved model parameters')
cmd_args, _ = cmd_opt.parse_known_args()
cmd_args.latent_dim = [int(x) for x in cmd_args.latent_dim.split('-')]
if len(cmd_args.latent_dim) == 1:
cmd_args.latent_dim = cmd_args.latent_dim[0]
class S2VGraph(object):
def __init__(self, g, label, node_tags=None, node_features=None):
self.g = g
self.num_nodes = len(node_tags)
self.node_tags = node_tags
self.label = label
self.node_features = node_features # numpy array (node_num * feature_dim)
self.degs = list(dict(g.degree()).values())
if len(g.edges()) != 0:
x, y = zip(*g.edges())
self.num_edges = len(x)
self.edge_pairs = np.ndarray(shape=(self.num_edges, 2), dtype=np.int32)
self.edge_pairs[:, 0] = x
self.edge_pairs[:, 1] = y
self.edge_pairs = self.edge_pairs.flatten()
else:
self.num_edges = 0
self.edge_pairs = np.array([])
def load_data(root_dir, degree_as_tag):
print('loading data')
g_list = []
label_dict = {}
feat_dict = {}
data_file = os.path.join(root_dir, '%s/%s.txt' % (cmd_args.data, cmd_args.data))
with open(data_file, 'r') as f:
n_g = int(f.readline().strip())
row_list = []
for i in range(n_g):
row = f.readline().strip().split()
n, l = [int(w) for w in row]
row_list.append(int(n))
if not l in label_dict:
mapped = len(label_dict)
label_dict[l] = mapped
g = nx.Graph()
node_tags = []
node_features = []
n_edges = 0
for j in range(n):
g.add_node(j)
row = f.readline().strip().split()
tmp = int(row[1]) + 2
if tmp == len(row):
row = [int(w) for w in row]
attr = None
else:
row, attr = [int(w) for w in row[:tmp]], np.array([float(w) for w in row[tmp:]])
if not row[0] in feat_dict:
mapped = len(feat_dict)
feat_dict[row[0]] = mapped
node_tags.append(feat_dict[row[0]])
if tmp > len(row):
node_features.append(attr)
n_edges += row[1]
for k in range(2, len(row)):
g.add_edge(j, row[k])
g.add_edge(j, j)
if node_features != []:
node_features = np.stack(node_features)
node_feature_flag = True
else:
node_features = None
node_feature_flag = False
assert len(g) == n
g_list.append(S2VGraph(g, l, node_tags, node_features))
print('max node num: ', np.max(row_list), 'min node num: ', np.min(row_list), 'mean node num: ', np.mean(row_list))
for g in g_list:
g.neighbors = [[] for i in range(len(g.g))]
for i, j in g.g.edges():
g.neighbors[i].append(j)
g.neighbors[j].append(i)
degree_list = []
for i in range(len(g.g)):
g.neighbors[i] = g.neighbors[i]
degree_list.append(len(g.neighbors[i]))
g.max_neighbor = max(degree_list)
g.label = label_dict[g.label]
edges = [list(pair) for pair in g.g.edges()]
edges.extend([[i, j] for j, i in edges])
deg_list = list(dict(g.g.degree(range(len(g.g)))).values())
g.edge_mat = torch.LongTensor(edges).transpose(0,1)
if degree_as_tag:
for g in g_list:
g.node_tags_ = list(dict(g.g.degree).values())
tagset = set([])
for g in g_list:
tagset = tagset.union(set(g.node_tags_))
tagset = list(tagset)
tag2index = {tagset[i]:i for i in range(len(tagset))}
for g in g_list:
g.node_features = torch.zeros(len(g.node_tags_), len(tagset))
g.node_features[range(len(g.node_tags_)), [tag2index[tag] for tag in g.node_tags_]] = 1
node_feature_flag = True
cmd_args.num_class = len(label_dict)
cmd_args.feat_dim = len(feat_dict) # maximum node label (tag)
if node_feature_flag == True:
cmd_args.attr_dim = len(tagset) # dim of node features (attributes)
else:
cmd_args.attr_dim = 0
print('# classes: %d' % cmd_args.num_class)
print("# data: %d" % len(g_list))
return g_list
def sep_data(root_dir, graph_list, fold_idx, seed=0):
train_idx = np.loadtxt(os.path.join(root_dir, '%s/10fold_idx/train_idx-%d.txt' % (cmd_args.data, fold_idx)), dtype=np.int32).tolist()
test_idx = np.loadtxt(os.path.join(root_dir, '%s/10fold_idx/test_idx-%d.txt' % (cmd_args.data, fold_idx)), dtype=np.int32).tolist()
return [graph_list[i] for i in train_idx], [graph_list[i] for i in test_idx] | 7,339 | 41.183908 | 153 | py |
GXN | GXN-main/ops.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import scipy.sparse as sp
def spec_normalize_adj(adj, high_order=False):
adj = adj.to_dense().cpu().numpy()
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
adj_norm = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return torch.FloatTensor(adj_norm.todense())
def spac_normalize_adj(adj, high_order=False):
adj = adj.to_dense().cpu().numpy()
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -1.).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
adj_norm = adj.dot(d_mat_inv_sqrt).transpose().tocoo()
return torch.FloatTensor(adj_norm.todense())
def normalize_adj_torch(mx):
mx = mx.to_dense()
rowsum = mx.sum(1)
r_inv_sqrt = torch.pow(rowsum, -0.5).flatten()
r_inv_sqrt[torch.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = torch.diag(r_inv_sqrt)
mx = torch.matmul(mx, r_mat_inv_sqrt)
mx = torch.transpose(mx, 0, 1)
mx = torch.matmul(mx, r_mat_inv_sqrt)
return mx
class MLP(nn.Module):
def __init__(self, in_ft, out_ft, act='prelu', bias=True):
super().__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=bias)
self.act = nn.PReLU() if act == 'prelu' else act
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, x):
x_fts = self.fc(x)
if self.bias is not None:
x_fts += self.bias
return self.act(x_fts)
class GCN_MI(nn.Module):
def __init__(self, in_ft, out_ft, act='prelu', bias=True):
super().__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU() if act == 'prelu' else act
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, A, x, sparse=False):
x_fts = self.fc(x)
if sparse:
out = torch.unsqueeze(torch.spmm(A, torch.squeeze(x_fts, 0)), 0)
else:
out = torch.bmm(A.unsqueeze(0), x_fts.unsqueeze(0))
if self.bias is not None:
out += self.bias
return self.act(out).squeeze(0)
class GCN(nn.Module):
def __init__(self, in_dim, out_dim):
super(GCN, self).__init__()
self.proj = nn.Linear(in_dim, out_dim)
self.drop = nn.Dropout(p=0.3)
def forward(self, A, X, act=None):
X = self.drop(X)
X = torch.matmul(A, X)
X = self.proj(X)
if act is not None:
X = act(X)
return X
class Discriminator(nn.Module):
def __init__(self, n_h):
super().__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = c
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), -2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), -2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 0).squeeze(-1)
v = logits.shape[0]
return logits, logits[:v//2]
class GraphCrossnet(nn.Module):
def __init__(self, ks, in_dim, out_dim, dim=48, cross_weight=1.0, fuse_weight=1.0, R=1, cross_layer=2):
super(GraphCrossnet, self).__init__()
self.ks = ks
self.cs_w = cross_weight
self.fs_w = fuse_weight
self.cs_l = cross_layer
self.start_gcn_s1 = GCN(in_dim, dim)
self.start_gcn_s2 = GCN(dim, dim)
self.end_gcn = GCN(2*dim, out_dim)
self.index_select_s1 = IndexSelect(ks[0], dim, act='prelu', R=R)
self.index_select_s2 = IndexSelect(ks[1], dim, act='prelu', R=R)
self.pool_s12_start = GraphPool(dim)
self.pool_s23_start = GraphPool(dim)
self.unpool_s21_end = GraphUnpool(dim)
self.unpool_s32_end = GraphUnpool(dim)
self.s1_l1 = GCN(dim, dim)
self.s1_l2 = GCN(dim, dim)
self.s1_l3 = GCN(dim, dim)
self.s2_l1 = GCN(dim, dim)
self.s2_l2 = GCN(dim, dim)
self.s2_l3 = GCN(dim, dim)
self.s3_l1 = GCN(dim, dim)
self.s3_l2 = GCN(dim, dim)
self.s3_l3 = GCN(dim, dim)
if self.cs_l>=1:
self.pool_s12_1 = GraphPool(dim, g=True)
self.unpool_s21_1 = GraphUnpool(dim)
self.pool_s23_1 = GraphPool(dim, g=True)
self.unpool_s32_1 = GraphUnpool(dim)
if self.cs_l>=2:
self.pool_s12_2 = GraphPool(dim, g=True)
self.unpool_s21_2 = GraphUnpool(dim)
self.pool_s23_2 = GraphPool(dim, g=True)
self.unpool_s32_2 = GraphUnpool(dim)
def forward(self, A, x):
A_s1 = A
x_s1 = self.start_gcn_s1(A_s1, x)
x_org = x_s1
x_s1_ = torch.zeros_like(x_s1)
x_s1_ = x_s1[torch.randperm(x_s1.shape[0]),:]
ret_s1, value_s1, idx_s1, idx_s1_, Xdown_s1 = self.index_select_s1(x_s1, x_s1_, A_s1)
x_s2, A_s2 = self.pool_s12_start(A_s1, x_s1, idx_s1, idx_s1_, value_s1, initlayer=True)
x_s2 = self.start_gcn_s2(A_s2, x_s2)
x_s2_ = torch.zeros_like(x_s2)
x_s2_ = x_s2[torch.randperm(x_s2.shape[0]),:]
ret_s2, value_s2, idx_s2, idx_s2_, Xdown_s2 = self.index_select_s2(x_s2, x_s2_, A_s2)
x_s3, A_s3 = self.pool_s23_start(A_s2, x_s2, idx_s2, idx_s2_, value_s2, initlayer=True)
res_s1_0, res_s2_0, res_s3_0 = x_s1, x_s2, x_s3
x_s1 = self.s1_l1(A_s1, x_s1, F.relu)
x_s2 = self.s2_l1(A_s2, x_s2, F.relu)
x_s3 = self.s3_l1(A_s3, x_s3, F.relu)
res_s1_1, res_s2_1, res_s3_1 = x_s1, x_s2, x_s3
if self.cs_l >= 1:
x_s12_fu = self.pool_s12_1(A_s1, x_s1, idx_s1, idx_s1_, value_s1)
x_s21_fu = self.unpool_s21_1(A_s1, x_s2, idx_s1)
x_s23_fu = self.pool_s23_1(A_s2, x_s2, idx_s2, idx_s2_, value_s2)
x_s32_fu = self.unpool_s32_1(A_s2, x_s3, idx_s2)
x_s1 = x_s1 + self.cs_w * x_s21_fu + res_s1_0
x_s2 = x_s2 + self.cs_w * (x_s12_fu + x_s32_fu)/2 + res_s2_0
x_s3 = x_s3 + self.cs_w * x_s23_fu + res_s3_0
x_s1 = self.s1_l2(A_s1, x_s1, F.relu)
x_s2 = self.s2_l2(A_s2, x_s2, F.relu)
x_s3 = self.s3_l2(A_s3, x_s3, F.relu)
if self.cs_l >= 2:
x_s12_fu = self.pool_s12_2(A_s1, x_s1, idx_s1, idx_s1_, value_s1)
x_s21_fu = self.unpool_s21_2(A_s1, x_s2, idx_s1)
x_s23_fu = self.pool_s23_2(A_s2, x_s2, idx_s2, idx_s2_, value_s2)
x_s32_fu = self.unpool_s32_2(A_s2, x_s3, idx_s2)
x_s1 = x_s1 + self.cs_w * 0.05 * x_s21_fu
x_s2 = x_s2 + self.cs_w * 0.05 * (x_s12_fu + x_s32_fu)/2
x_s3 = x_s3 + self.cs_w * 0.05 * x_s23_fu
x_s1 = self.s1_l3(A_s1, x_s1, F.relu)
x_s2 = self.s2_l3(A_s2, x_s2, F.relu)
x_s3 = self.s3_l3(A_s3, x_s3, F.relu)
x_s3_out = self.unpool_s32_end(A_s2, x_s3, idx_s2) + Xdown_s2
x_s2_out = self.unpool_s21_end(A_s1, x_s2 + x_s3_out, idx_s1)
x_agg = x_s1 + x_s2_out * self.fs_w + Xdown_s1 * self.fs_w
x_agg = torch.cat([x_agg, x_org], 1)
x_agg = self.end_gcn(A_s1, x_agg)
return x_agg, ret_s1, ret_s2
class IndexSelect(nn.Module):
def __init__(self, k, n_h, act, R=1):
super().__init__()
self.k = k
self.R = R
self.sigm = nn.Sigmoid()
self.fc = MLP(n_h, n_h, act)
self.disc = Discriminator(n_h)
self.gcn1 = GCN(n_h, n_h)
def forward(self, seq1, seq2, A, samp_bias1=None, samp_bias2=None):
h_1 = self.fc(seq1)
h_2 = self.fc(seq2)
h_n1 = self.gcn1(A, h_1)
X = self.sigm(h_n1)
ret, ret_true = self.disc(X, h_1, h_2, samp_bias1, samp_bias2)
scores = self.sigm(ret_true).squeeze()
num_nodes = A.shape[0]
values, idx = torch.topk(scores, int(num_nodes))
values1, idx1 = values[:int(self.k*num_nodes)], idx[:int(self.k*num_nodes)]
values0, idx0 = values[int(self.k*num_nodes):], idx[int(self.k*num_nodes):]
return ret, values1, idx1, idx0, h_n1
class GraphPool(nn.Module):
def __init__(self, in_dim, g=False):
super(GraphPool, self).__init__()
self.g = g
if self.g:
self.down_gcn = GCN(in_dim, in_dim)
def forward(self, A, X, idx, idx_=None, value=None, initlayer=False):
if self.g:
X = self.down_gcn(A, X)
new_x = X[idx,:]
score = torch.unsqueeze(value, -1)
new_x = torch.mul(new_x, score)
if initlayer:
A = self.removeedge(A, idx)
return new_x, A
else:
return new_x
def removeedge(self, A, idx):
A_ = A[idx,:]
A_ = A_[:,idx]
return A_
class GraphUnpool(nn.Module):
def __init__(self, in_dim):
super(GraphUnpool, self).__init__()
self.up_gcn = GCN(in_dim, in_dim)
def forward(self, A, X, idx):
new_X = torch.zeros([A.shape[0], X.shape[1]]).to(X.device)
new_X[idx] = X
new_X = self.up_gcn(A, new_X)
return new_X | 10,385 | 31.867089 | 107 | py |
GXN | GXN-main/mlp_dropout.py | from __future__ import print_function
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class MLPRegression(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLPRegression, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, 1)
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
pred = self.h2_weights(h1)
if y is not None:
y = Variable(y)
mse = F.mse_loss(pred, y)
mae = F.l1_loss(pred, y)
return pred, mae, mse
else:
return pred
class MLPClassifier(nn.Module):
def __init__(self, input_size, hidden_size, num_class, with_dropout=False):
super(MLPClassifier, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, num_class)
self.with_dropout = with_dropout
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
if self.with_dropout:
h1 = F.dropout(h1, training=self.training)
logits = self.h2_weights(h1)
logits = F.log_softmax(logits, dim=1)
if y is not None:
loss = F.nll_loss(logits, y)
pred = logits.data.max(1, keepdim=True)[1]
acc = pred.eq(y.data.view_as(pred)).cpu().sum().item() / float(y.size()[0])
return logits, loss, acc
else:
return logits
| 1,575 | 28.735849 | 87 | py |
GXN | GXN-main/lib/pytorch_util.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from gnn_lib import GNNLIB
def glorot_uniform(t):
if len(t.size()) == 2:
fan_in, fan_out = t.size()
elif len(t.size()) == 3:
# out_ch, in_ch, kernel for Conv 1
fan_in = t.size()[1] * t.size()[2]
fan_out = t.size()[0] * t.size()[2]
else:
fan_in = np.prod(t.size())
fan_out = np.prod(t.size())
limit = np.sqrt(6.0 / (fan_in + fan_out))
t.uniform_(-limit, limit)
def _param_init(m):
if isinstance(m, Parameter):
glorot_uniform(m.data)
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
glorot_uniform(m.weight.data)
def weights_init(m):
for p in m.modules():
if isinstance(p, nn.ParameterList):
for pp in p:
_param_init(pp)
else:
_param_init(p)
for name, p in m.named_parameters():
if not '.' in name: # top-level parameters
_param_init(p)
class MySpMM(torch.autograd.Function):
@staticmethod
def forward(ctx, sp_mat, dense_mat):
ctx.save_for_backward(sp_mat, dense_mat)
return torch.mm(sp_mat, dense_mat)
@staticmethod
def backward(ctx, grad_output):
sp_mat, dense_mat = ctx.saved_variables
grad_matrix1 = grad_matrix2 = None
assert not ctx.needs_input_grad[0]
if ctx.needs_input_grad[1]:
grad_matrix2 = Variable(torch.mm(sp_mat.data.t(), grad_output.data))
return grad_matrix1, grad_matrix2
def gnn_spmm(sp_mat, dense_mat):
return MySpMM.apply(sp_mat, dense_mat)
| 1,850 | 25.070423 | 80 | py |
GXN | GXN-main/lib/gnn_lib.py | import ctypes
import numpy as np
import os
import sys
import torch
import pdb
class _gnn_lib(object):
def __init__(self, args):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/libgnn.so' % dir_path)
self.lib.GetGraphStruct.restype = ctypes.c_void_p
self.lib.PrepareBatchGraph.restype = ctypes.c_int
self.lib.PrepareSparseMatrices.restype = ctypes.c_int
self.lib.NumEdgePairs.restype = ctypes.c_int
if sys.version_info[0] > 2:
args = [arg.encode() for arg in args] # str -> bytes for each element in args
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
self.lib.Init(len(args), arr)
self.batch_graph_handle = ctypes.c_void_p(self.lib.GetGraphStruct())
def _prepare_graph(self, graph_list, is_directed=0):
edgepair_list = (ctypes.c_void_p * len(graph_list))()
list_num_nodes = np.zeros((len(graph_list), ), dtype=np.int32)
list_num_edges = np.zeros((len(graph_list), ), dtype=np.int32)
for i in range(len(graph_list)):
if type(graph_list[i].edge_pairs) is ctypes.c_void_p:
edgepair_list[i] = graph_list[i].edge_pairs
elif type(graph_list[i].edge_pairs) is np.ndarray:
edgepair_list[i] = ctypes.c_void_p(graph_list[i].edge_pairs.ctypes.data)
else:
raise NotImplementedError
list_num_nodes[i] = graph_list[i].num_nodes
list_num_edges[i] = graph_list[i].num_edges
total_num_nodes = np.sum(list_num_nodes)
total_num_edges = np.sum(list_num_edges)
self.lib.PrepareBatchGraph(self.batch_graph_handle,
len(graph_list),
ctypes.c_void_p(list_num_nodes.ctypes.data),
ctypes.c_void_p(list_num_edges.ctypes.data),
ctypes.cast(edgepair_list, ctypes.c_void_p),
is_directed)
return total_num_nodes, total_num_edges
def PrepareSparseMatrices(self, graph_list, is_directed=0):
assert not is_directed
total_num_nodes, total_num_edges = self._prepare_graph(graph_list, is_directed)
n2n_idxes = torch.LongTensor(2, total_num_edges * 2)
n2n_vals = torch.FloatTensor(total_num_edges * 2)
e2n_idxes = torch.LongTensor(2, total_num_edges * 2)
e2n_vals = torch.FloatTensor(total_num_edges * 2)
subg_idxes = torch.LongTensor(2, total_num_nodes)
subg_vals = torch.FloatTensor(total_num_nodes)
idx_list = (ctypes.c_void_p * 3)()
idx_list[0] = n2n_idxes.numpy().ctypes.data
idx_list[1] = e2n_idxes.numpy().ctypes.data
idx_list[2] = subg_idxes.numpy().ctypes.data
val_list = (ctypes.c_void_p * 3)()
val_list[0] = n2n_vals.numpy().ctypes.data
val_list[1] = e2n_vals.numpy().ctypes.data
val_list[2] = subg_vals.numpy().ctypes.data
self.lib.PrepareSparseMatrices(self.batch_graph_handle,
ctypes.cast(idx_list, ctypes.c_void_p),
ctypes.cast(val_list, ctypes.c_void_p))
n2n_sp = torch.sparse.FloatTensor(n2n_idxes, n2n_vals, torch.Size([total_num_nodes, total_num_nodes]))
e2n_sp = torch.sparse.FloatTensor(e2n_idxes, e2n_vals, torch.Size([total_num_nodes, total_num_edges * 2]))
subg_sp = torch.sparse.FloatTensor(subg_idxes, subg_vals, torch.Size([len(graph_list), total_num_nodes]))
return n2n_sp, e2n_sp, subg_sp
dll_path = '%s/build/dll/libgnn.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
GNNLIB = _gnn_lib(sys.argv)
else:
GNNLIB = None
| 3,813 | 40.912088 | 114 | py |
GXN | GXN-main/pytorch_structure2vec-master/graph_classification/main.py | import sys
import os
import torch
import random
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append('%s/../s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from embedding import EmbedMeanField, EmbedLoopyBP
from mlp import MLPClassifier
from util import cmd_args, load_data
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
if cmd_args.gm == 'mean_field':
model = EmbedMeanField
elif cmd_args.gm == 'loopy_bp':
model = EmbedLoopyBP
else:
print('unknown gm %s' % cmd_args.gm)
sys.exit()
self.s2v = model(latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=cmd_args.feat_dim,
num_edge_feats=0,
max_lv=cmd_args.max_lv)
out_dim = cmd_args.out_dim
if out_dim == 0:
out_dim = cmd_args.latent_dim
self.mlp = MLPClassifier(input_size=out_dim, hidden_size=cmd_args.hidden, num_class=cmd_args.num_class)
def PrepareFeatureLabel(self, batch_graph):
labels = torch.LongTensor(len(batch_graph))
n_nodes = 0
concat_feat = []
for i in range(len(batch_graph)):
labels[i] = batch_graph[i].label
n_nodes += batch_graph[i].num_nodes
concat_feat += batch_graph[i].node_tags
concat_feat = torch.LongTensor(concat_feat).view(-1, 1)
node_feat = torch.zeros(n_nodes, cmd_args.feat_dim)
node_feat.scatter_(1, concat_feat, 1)
if cmd_args.mode == 'gpu':
node_feat = node_feat.cuda()
labels = labels.cuda()
return node_feat, labels
def forward(self, batch_graph):
node_feat, labels = self.PrepareFeatureLabel(batch_graph)
embed = self.s2v(batch_graph, node_feat, None)
return self.mlp(embed, labels)
def loop_dataset(g_list, classifier, sample_idxes, optimizer=None, bsize=cmd_args.batch_size):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize
pbar = tqdm(range(total_iters), unit='batch')
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize : (pos + 1) * bsize]
batch_graph = [g_list[idx] for idx in selected_idx]
_, loss, acc = classifier(batch_graph)
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.data.cpu().numpy()[0]
pbar.set_description('loss: %0.5f acc: %0.5f' % (loss, acc) )
total_loss.append( np.array([loss, acc]) * len(selected_idx))
n_samples += len(selected_idx)
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
return avg_loss
if __name__ == '__main__':
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
train_graphs, test_graphs = load_data()
print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
classifier = Classifier()
if cmd_args.mode == 'gpu':
classifier = classifier.cuda()
optimizer = optim.Adam(classifier.parameters(), lr=cmd_args.learning_rate)
train_idxes = list(range(len(train_graphs)))
best_loss = None
for epoch in range(cmd_args.num_epochs):
random.shuffle(train_idxes)
avg_loss = loop_dataset(train_graphs, classifier, train_idxes, optimizer=optimizer)
print('\033[92maverage training of epoch %d: loss %.5f acc %.5f\033[0m' % (epoch, avg_loss[0], avg_loss[1]))
test_loss = loop_dataset(test_graphs, classifier, list(range(len(test_graphs))))
print('\033[93maverage test of epoch %d: loss %.5f acc %.5f\033[0m' % (epoch, test_loss[0], test_loss[1]))
# if best_loss is None or test_loss[0] < best_loss:
# best_loss = test_loss[0]
# print('----saving to best model since this is the best valid loss so far.----')
# torch.save(classifier.state_dict(), cmd_args.save_dir + '/epoch-best.model')
# save_args(cmd_args.save_dir + '/epoch-best-args.pkl', cmd_args)
| 4,506 | 35.346774 | 116 | py |
GXN | GXN-main/pytorch_structure2vec-master/graph_classification/util.py | from __future__ import print_function
import numpy as np
import random
from tqdm import tqdm
import os
import cPickle as cp
import networkx as nx
import argparse
cmd_opt = argparse.ArgumentParser(description='Argparser for graph_classification')
cmd_opt.add_argument('-mode', default='cpu', help='cpu/gpu')
cmd_opt.add_argument('-gm', default='mean_field', help='mean_field/loopy_bp')
cmd_opt.add_argument('-data', default=None, help='data folder name')
cmd_opt.add_argument('-batch_size', type=int, default=50, help='minibatch size')
cmd_opt.add_argument('-seed', type=int, default=1, help='seed')
cmd_opt.add_argument('-feat_dim', type=int, default=0, help='dimension of node feature')
cmd_opt.add_argument('-num_class', type=int, default=0, help='#classes')
cmd_opt.add_argument('-fold', type=int, default=1, help='fold (1..10)')
cmd_opt.add_argument('-num_epochs', type=int, default=1000, help='number of epochs')
cmd_opt.add_argument('-latent_dim', type=int, default=64, help='dimension of latent layers')
cmd_opt.add_argument('-out_dim', type=int, default=1024, help='s2v output size')
cmd_opt.add_argument('-hidden', type=int, default=100, help='dimension of regression')
cmd_opt.add_argument('-max_lv', type=int, default=4, help='max rounds of message passing')
cmd_opt.add_argument('-learning_rate', type=float, default=0.0001, help='init learning_rate')
cmd_args, _ = cmd_opt.parse_known_args()
print(cmd_args)
class S2VGraph(object):
def __init__(self, g, node_tags, label):
self.num_nodes = len(node_tags)
self.node_tags = node_tags
self.label = label
x, y = zip(*g.edges())
self.num_edges = len(x)
self.edge_pairs = np.ndarray(shape=(self.num_edges, 2), dtype=np.int32)
self.edge_pairs[:, 0] = x
self.edge_pairs[:, 1] = y
self.edge_pairs = self.edge_pairs.flatten()
def load_data():
print('loading data')
g_list = []
label_dict = {}
feat_dict = {}
with open('./data/%s/%s.txt' % (cmd_args.data, cmd_args.data), 'r') as f:
n_g = int(f.readline().strip())
for i in range(n_g):
row = f.readline().strip().split()
n, l = [int(w) for w in row]
if not l in label_dict:
mapped = len(label_dict)
label_dict[l] = mapped
g = nx.Graph()
node_tags = []
n_edges = 0
for j in range(n):
g.add_node(j)
row = f.readline().strip().split()
row = [int(w) for w in row]
if not row[0] in feat_dict:
mapped = len(feat_dict)
feat_dict[row[0]] = mapped
node_tags.append(feat_dict[row[0]])
n_edges += row[1]
for k in range(2, len(row)):
g.add_edge(j, row[k])
assert len(g.edges()) * 2 == n_edges
assert len(g) == n
g_list.append(S2VGraph(g, node_tags, l))
for g in g_list:
g.label = label_dict[g.label]
cmd_args.num_class = len(label_dict)
cmd_args.feat_dim = len(feat_dict)
print('# classes: %d' % cmd_args.num_class)
print('# node features: %d' % cmd_args.feat_dim)
train_idxes = np.loadtxt('./data/%s/10fold_idx/train_idx-%d.txt' % (cmd_args.data, cmd_args.fold), dtype=np.int32).tolist()
test_idxes = np.loadtxt('./data/%s/10fold_idx/test_idx-%d.txt' % (cmd_args.data, cmd_args.fold), dtype=np.int32).tolist()
return [g_list[i] for i in train_idxes], [g_list[i] for i in test_idxes]
| 3,574 | 39.625 | 127 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/s2v_lib.py | import ctypes
import numpy as np
import os
import sys
import torch
class _s2v_lib(object):
def __init__(self, args):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/libs2v.so' % dir_path)
self.lib.GetGraphStruct.restype = ctypes.c_void_p
self.lib.PrepareBatchGraph.restype = ctypes.c_int
self.lib.PrepareMeanField.restype = ctypes.c_int
self.lib.PrepareLoopyBP.restype = ctypes.c_int
self.lib.NumEdgePairs.restype = ctypes.c_int
if sys.version_info[0] > 2:
args = [arg.encode() for arg in args] # str -> bytes for each element in args
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
self.lib.Init(len(args), arr)
self.batch_graph_handle = ctypes.c_void_p(self.lib.GetGraphStruct())
def _prepare_graph(self, graph_list, is_directed=0):
edgepair_list = (ctypes.c_void_p * len(graph_list))()
list_num_nodes = np.zeros((len(graph_list), ), dtype=np.int32)
list_num_edges = np.zeros((len(graph_list), ), dtype=np.int32)
for i in range(len(graph_list)):
if type(graph_list[i].edge_pairs) is ctypes.c_void_p:
edgepair_list[i] = graph_list[i].edge_pairs
elif type(graph_list[i].edge_pairs) is np.ndarray:
edgepair_list[i] = ctypes.c_void_p(graph_list[i].edge_pairs.ctypes.data)
else:
raise NotImplementedError
list_num_nodes[i] = graph_list[i].num_nodes
list_num_edges[i] = graph_list[i].num_edges
total_num_nodes = np.sum(list_num_nodes)
total_num_edges = np.sum(list_num_edges)
self.lib.PrepareBatchGraph(self.batch_graph_handle,
len(graph_list),
ctypes.c_void_p(list_num_nodes.ctypes.data),
ctypes.c_void_p(list_num_edges.ctypes.data),
ctypes.cast(edgepair_list, ctypes.c_void_p),
is_directed)
return total_num_nodes, total_num_edges
def PrepareMeanField(self, graph_list, is_directed=0):
assert not is_directed
total_num_nodes, total_num_edges = self._prepare_graph(graph_list, is_directed)
n2n_idxes = torch.LongTensor(2, total_num_edges * 2)
n2n_vals = torch.FloatTensor(total_num_edges * 2)
e2n_idxes = torch.LongTensor(2, total_num_edges * 2)
e2n_vals = torch.FloatTensor(total_num_edges * 2)
subg_idxes = torch.LongTensor(2, total_num_nodes)
subg_vals = torch.FloatTensor(total_num_nodes)
idx_list = (ctypes.c_void_p * 3)()
idx_list[0] = n2n_idxes.numpy().ctypes.data
idx_list[1] = e2n_idxes.numpy().ctypes.data
idx_list[2] = subg_idxes.numpy().ctypes.data
val_list = (ctypes.c_void_p * 3)()
val_list[0] = n2n_vals.numpy().ctypes.data
val_list[1] = e2n_vals.numpy().ctypes.data
val_list[2] = subg_vals.numpy().ctypes.data
self.lib.PrepareMeanField(self.batch_graph_handle,
ctypes.cast(idx_list, ctypes.c_void_p),
ctypes.cast(val_list, ctypes.c_void_p))
n2n_sp = torch.sparse.FloatTensor(n2n_idxes, n2n_vals, torch.Size([total_num_nodes, total_num_nodes]))
e2n_sp = torch.sparse.FloatTensor(e2n_idxes, e2n_vals, torch.Size([total_num_nodes, total_num_edges * 2]))
subg_sp = torch.sparse.FloatTensor(subg_idxes, subg_vals, torch.Size([len(graph_list), total_num_nodes]))
return n2n_sp, e2n_sp, subg_sp
def PrepareLoopyBP(self, graph_list, is_directed=0):
assert not is_directed
total_num_nodes, total_num_edges = self._prepare_graph(graph_list, is_directed)
total_edge_pairs = self.lib.NumEdgePairs(self.batch_graph_handle)
n2e_idxes = torch.LongTensor(2, total_num_edges * 2)
n2e_vals = torch.FloatTensor(total_num_edges * 2)
e2e_idxes = torch.LongTensor(2, total_edge_pairs)
e2e_vals = torch.FloatTensor(total_edge_pairs)
e2n_idxes = torch.LongTensor(2, total_num_edges * 2)
e2n_vals = torch.FloatTensor(total_num_edges * 2)
subg_idxes = torch.LongTensor(2, total_num_nodes)
subg_vals = torch.FloatTensor(total_num_nodes)
idx_list = (ctypes.c_void_p * 4)()
idx_list[0] = ctypes.c_void_p(n2e_idxes.numpy().ctypes.data)
idx_list[1] = ctypes.c_void_p(e2e_idxes.numpy().ctypes.data)
idx_list[2] = ctypes.c_void_p(e2n_idxes.numpy().ctypes.data)
idx_list[3] = ctypes.c_void_p(subg_idxes.numpy().ctypes.data)
val_list = (ctypes.c_void_p * 4)()
val_list[0] = ctypes.c_void_p(n2e_vals.numpy().ctypes.data)
val_list[1] = ctypes.c_void_p(e2e_vals.numpy().ctypes.data)
val_list[2] = ctypes.c_void_p(e2n_vals.numpy().ctypes.data)
val_list[3] = ctypes.c_void_p(subg_vals.numpy().ctypes.data)
self.lib.PrepareLoopyBP(self.batch_graph_handle,
ctypes.cast(idx_list, ctypes.c_void_p),
ctypes.cast(val_list, ctypes.c_void_p))
n2e_sp = torch.sparse.FloatTensor(n2e_idxes, n2e_vals, torch.Size([total_num_edges * 2, total_num_nodes]))
e2e_sp = torch.sparse.FloatTensor(e2e_idxes, e2e_vals, torch.Size([total_num_edges * 2, total_num_edges * 2]))
e2n_sp = torch.sparse.FloatTensor(e2n_idxes, e2n_vals, torch.Size([total_num_nodes, total_num_edges * 2]))
subg_sp = torch.sparse.FloatTensor(subg_idxes, subg_vals, torch.Size([len(graph_list), total_num_nodes]))
return n2e_sp, e2e_sp, e2n_sp, subg_sp
dll_path = '%s/build/dll/libs2v.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
S2VLIB = _s2v_lib(sys.argv)
else:
S2VLIB = None
if __name__ == '__main__':
sys.path.append('%s/../harvard_cep' % os.path.dirname(os.path.realpath(__file__)))
from util import resampling_idxes, load_raw_data
from mol_lib import MOLLIB, MolGraph
raw_data_dict = load_raw_data()
test_data = MOLLIB.LoadMolGraph('test', raw_data_dict['test'])
batch_graph = test_data[0:10]
S2VLIB.PrepareLoopyBP(batch_graph)
| 6,308 | 43.429577 | 118 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/embedding.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from s2v_lib import S2VLIB
from pytorch_util import weights_init, gnn_spmm
class EmbedMeanField(nn.Module):
def __init__(self, latent_dim, output_dim, num_node_feats, num_edge_feats, max_lv = 3):
super(EmbedMeanField, self).__init__()
self.latent_dim = latent_dim
self.output_dim = output_dim
self.num_node_feats = num_node_feats
self.num_edge_feats = num_edge_feats
self.max_lv = max_lv
self.w_n2l = nn.Linear(num_node_feats, latent_dim)
if num_edge_feats > 0:
self.w_e2l = nn.Linear(num_edge_feats, latent_dim)
if output_dim > 0:
self.out_params = nn.Linear(latent_dim, output_dim)
self.conv_params = nn.Linear(latent_dim, latent_dim)
weights_init(self)
def forward(self, graph_list, node_feat, edge_feat):
n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
if type(node_feat) is torch.cuda.FloatTensor:
n2n_sp = n2n_sp.cuda()
e2n_sp = e2n_sp.cuda()
subg_sp = subg_sp.cuda()
node_feat = Variable(node_feat)
if edge_feat is not None:
edge_feat = Variable(edge_feat)
n2n_sp = Variable(n2n_sp)
e2n_sp = Variable(e2n_sp)
subg_sp = Variable(subg_sp)
h = self.mean_field(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp)
return h
def mean_field(self, node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp):
input_node_linear = self.w_n2l(node_feat)
input_message = input_node_linear
if edge_feat is not None:
input_edge_linear = self.w_e2l(edge_feat)
e2npool_input = gnn_spmm(e2n_sp, input_edge_linear)
input_message += e2npool_input
input_potential = F.relu(input_message)
lv = 0
cur_message_layer = input_potential
while lv < self.max_lv:
n2npool = gnn_spmm(n2n_sp, cur_message_layer)
node_linear = self.conv_params( n2npool )
merged_linear = node_linear + input_message
cur_message_layer = F.relu(merged_linear)
lv += 1
if self.output_dim > 0:
out_linear = self.out_params(cur_message_layer)
reluact_fp = F.relu(out_linear)
else:
reluact_fp = cur_message_layer
y_potential = gnn_spmm(subg_sp, reluact_fp)
return F.relu(y_potential)
class EmbedLoopyBP(nn.Module):
def __init__(self, latent_dim, output_dim, num_node_feats, num_edge_feats, max_lv = 3):
super(EmbedLoopyBP, self).__init__()
self.latent_dim = latent_dim
self.max_lv = max_lv
self.w_n2l = nn.Linear(num_node_feats, latent_dim)
self.w_e2l = nn.Linear(num_edge_feats, latent_dim)
self.out_params = nn.Linear(latent_dim, output_dim)
self.conv_params = nn.Linear(latent_dim, latent_dim)
weights_init(self)
def forward(self, graph_list, node_feat, edge_feat):
n2e_sp, e2e_sp, e2n_sp, subg_sp = S2VLIB.PrepareLoopyBP(graph_list)
if type(node_feat) is torch.cuda.FloatTensor:
n2e_sp = n2e_sp.cuda()
e2e_sp = e2e_sp.cuda()
e2n_sp = e2n_sp.cuda()
subg_sp = subg_sp.cuda()
node_feat = Variable(node_feat)
edge_feat = Variable(edge_feat)
n2e_sp = Variable(n2e_sp)
e2e_sp = Variable(e2e_sp)
e2n_sp = Variable(e2n_sp)
subg_sp = Variable(subg_sp)
h = self.loopy_bp(node_feat, edge_feat, n2e_sp, e2e_sp, e2n_sp, subg_sp)
return h
def loopy_bp(self, node_feat, edge_feat, n2e_sp, e2e_sp, e2n_sp, subg_sp):
input_node_linear = self.w_n2l(node_feat)
input_edge_linear = self.w_e2l(edge_feat)
n2epool_input = gnn_spmm(n2e_sp, input_node_linear)
input_message = input_edge_linear + n2epool_input
input_potential = F.relu(input_message)
lv = 0
cur_message_layer = input_potential
while lv < self.max_lv:
e2epool = gnn_spmm(e2e_sp, cur_message_layer)
edge_linear = self.conv_params(e2epool)
merged_linear = edge_linear + input_message
cur_message_layer = F.relu(merged_linear)
lv += 1
e2npool = gnn_spmm(e2n_sp, cur_message_layer)
hidden_msg = F.relu(e2npool)
out_linear = self.out_params(hidden_msg)
reluact_fp = F.relu(out_linear)
y_potential = gnn_spmm(subg_sp, reluact_fp)
return F.relu(y_potential)
| 4,855 | 33.685714 | 91 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/pytorch_util.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from s2v_lib import S2VLIB
def glorot_uniform(t):
if len(t.size()) == 2:
fan_in, fan_out = t.size()
elif len(t.size()) == 3:
# out_ch, in_ch, kernel for Conv 1
fan_in = t.size()[1] * t.size()[2]
fan_out = t.size()[0] * t.size()[2]
else:
fan_in = np.prod(t.size())
fan_out = np.prod(t.size())
limit = np.sqrt(6.0 / (fan_in + fan_out))
t.uniform_(-limit, limit)
def _param_init(m):
if isinstance(m, Parameter):
glorot_uniform(m.data)
elif isinstance(m, nn.Linear):
if m.bias is not None:
m.bias.data.zero_()
glorot_uniform(m.weight.data)
def weights_init(m):
for p in m.modules():
if isinstance(p, nn.ParameterList):
for pp in p:
_param_init(pp)
else:
_param_init(p)
for name, p in m.named_parameters():
if not '.' in name: # top-level parameters
_param_init(p)
class MySpMM(torch.autograd.Function):
@staticmethod
def forward(ctx, sp_mat, dense_mat):
ctx.save_for_backward(sp_mat, dense_mat)
return torch.mm(sp_mat, dense_mat)
@staticmethod
def backward(ctx, grad_output):
sp_mat, dense_mat = ctx.saved_variables
grad_matrix1 = grad_matrix2 = None
assert not ctx.needs_input_grad[0]
if ctx.needs_input_grad[1]:
grad_matrix2 = Variable(torch.mm(sp_mat.data.t(), grad_output.data))
return grad_matrix1, grad_matrix2
def gnn_spmm(sp_mat, dense_mat):
return MySpMM.apply(sp_mat, dense_mat)
| 1,885 | 25.194444 | 80 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/mlp.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from pytorch_util import weights_init
class MLPRegression(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLPRegression, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, 1)
weights_init(self)
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
pred = self.h2_weights(h1)
if y is not None:
y = Variable(y)
mse = F.mse_loss(pred, y)
mae = F.l1_loss(pred, y)
return pred, mae, mse
else:
return pred
class MLPClassifier(nn.Module):
def __init__(self, input_size, hidden_size, num_class):
super(MLPClassifier, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, num_class)
weights_init(self)
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
logits = self.h2_weights(h1)
logits = F.log_softmax(logits, dim=1)
if y is not None:
y = Variable(y)
loss = F.nll_loss(logits, y)
pred = logits.data.max(1, keepdim=True)[1]
acc = pred.eq(y.data.view_as(pred)).cpu().sum() / float(y.size()[0])
return logits, loss, acc
else:
return logits | 1,710 | 25.734375 | 80 | py |
GXN | GXN-main/pytorch_structure2vec-master/harvard_cep/main.py | import sys
import os
from mol_lib import MOLLIB, MolGraph
import torch
import random
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append('%s/../s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from embedding import EmbedMeanField, EmbedLoopyBP
from mlp import MLPRegression
from util import resampling_idxes, load_raw_data
import argparse
cmd_opt = argparse.ArgumentParser(description='Argparser for harvard cep')
cmd_opt.add_argument('-saved_model', default=None, help='start from existing model')
cmd_opt.add_argument('-save_dir', default='./saved', help='save_dir')
cmd_opt.add_argument('-mode', default='gpu', help='cpu/gpu')
cmd_opt.add_argument('-gm', default='mean_field', help='mean_field/loopy_bp')
cmd_opt.add_argument('-phase', default='train', help='train/test')
cmd_opt.add_argument('-batch_size', type=int, default=50, help='minibatch size')
cmd_opt.add_argument('-seed', type=int, default=1, help='seed')
cmd_opt.add_argument('-gen_depth', type=int, default=10, help='depth of generator')
cmd_opt.add_argument('-num_epochs', type=int, default=1000, help='number of epochs')
cmd_opt.add_argument('-latent_dim', type=int, default=64, help='dimension of latent layers')
cmd_opt.add_argument('-out_dim', type=int, default=1024, help='s2v output size')
cmd_opt.add_argument('-hidden', type=int, default=100, help='dimension of regression')
cmd_opt.add_argument('-max_lv', type=int, default=4, help='max rounds of message passing')
cmd_opt.add_argument('-learning_rate', type=float, default=0.0001, help='init learning_rate')
cmd_args, _ = cmd_opt.parse_known_args()
def loop_dataset(mol_list, regressor, sample_idxes, optimizer=None, start_iter=None, n_iters=None, bsize=cmd_args.batch_size):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize
if start_iter is not None:
ed_iter = start_iter + n_iters
if ed_iter > total_iters:
ed_iter = total_iters
pbar = tqdm(range(start_iter, ed_iter), unit='batch')
else:
pbar = tqdm(range(total_iters), unit='batch')
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize : (pos + 1) * bsize]
batch_graph = [mol_list[idx] for idx in selected_idx]
_, mae, mse = regressor(batch_graph)
if optimizer is not None:
optimizer.zero_grad()
mse.backward()
optimizer.step()
mae = mae.data.cpu().numpy()[0]
mse = mse.data.cpu().numpy()[0]
pbar.set_description('mae: %0.5f rmse: %0.5f' % (mae, np.sqrt(mse)) )
total_loss.append( np.array([mae, mse]) * len(selected_idx))
n_samples += len(selected_idx)
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
avg_loss[1] = np.sqrt(avg_loss[1])
return avg_loss
class Regressor(nn.Module):
def __init__(self):
super(Regressor, self).__init__()
if cmd_args.gm == 'mean_field':
model = EmbedMeanField
elif cmd_args.gm == 'loopy_bp':
model = EmbedLoopyBP
else:
print('unknown gm %s' % cmd_args.gm)
sys.exit()
self.s2v = model(latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=MOLLIB.num_node_feats,
num_edge_feats=MOLLIB.num_edge_feats,
max_lv=cmd_args.max_lv)
self.mlp = MLPRegression(input_size=cmd_args.out_dim, hidden_size=cmd_args.hidden)
def forward(self, batch_graph):
node_feat, edge_feat, labels = MOLLIB.PrepareFeatureLabel(batch_graph)
if cmd_args.mode == 'gpu':
node_feat = node_feat.cuda()
edge_feat = edge_feat.cuda()
labels = labels.cuda()
embed = self.s2v(batch_graph, node_feat, edge_feat)
return self.mlp(embed, labels)
if __name__ == '__main__':
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
raw_data_dict = load_raw_data()
regressor = Regressor()
if cmd_args.mode == 'gpu':
regressor = regressor.cuda()
if cmd_args.saved_model is not None and cmd_args.saved_model != '':
if os.path.isfile(cmd_args.saved_model):
print('loading model from %s' % cmd_args.saved_model)
if cmd_args.mode == 'cpu':
regressor.load_state_dict(torch.load(cmd_args.saved_model, map_location=lambda storage, loc: storage))
else:
regressor.load_state_dict(torch.load(cmd_args.saved_model))
if cmd_args.phase == 'test':
test_data = MOLLIB.LoadMolGraph('test', raw_data_dict['test'])
test_loss = loop_dataset(test_data, regressor, list(range(len(test_data))))
print('\033[93maverage test loss: mae %.5f rmse %.5f\033[0m' % (test_loss[0], test_loss[1]))
sys.exit()
train_idxes = resampling_idxes(raw_data_dict)
cooked_data_dict = {}
for d in raw_data_dict:
cooked_data_dict[d] = MOLLIB.LoadMolGraph(d, raw_data_dict[d])
optimizer = optim.Adam(regressor.parameters(), lr=cmd_args.learning_rate)
iter_train = (len(train_idxes) + (cmd_args.batch_size - 1)) // cmd_args.batch_size
best_valid_loss = None
for epoch in range(cmd_args.num_epochs):
valid_interval = 10000
for i in range(0, iter_train, valid_interval):
avg_loss = loop_dataset(cooked_data_dict['train'], regressor, train_idxes, optimizer, start_iter=i, n_iters=valid_interval)
print('\033[92maverage training of epoch %.2f: mae %.5f rmse %.5f\033[0m' % (epoch + min(float(i + valid_interval) / iter_train, 1.0), avg_loss[0], avg_loss[1]))
valid_loss = loop_dataset(cooked_data_dict['valid'], regressor, list(range(len(cooked_data_dict['valid']))))
print('\033[93maverage valid of epoch %.2f: mae %.5f rmse %.5f\033[0m' % (epoch + min(float(i + valid_interval) / iter_train, 1.0), valid_loss[0], valid_loss[1]))
if best_valid_loss is None or valid_loss[0] < best_valid_loss:
best_valid_loss = valid_loss[0]
print('----saving to best model since this is the best valid loss so far.----')
torch.save(regressor.state_dict(), cmd_args.save_dir + '/epoch-best.model')
random.shuffle(train_idxes)
| 6,670 | 42.888158 | 174 | py |
GXN | GXN-main/pytorch_structure2vec-master/harvard_cep/mol_lib.py | import ctypes
import numpy as np
import os
import sys
import torch
from tqdm import tqdm
class _mol_lib(object):
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/libmol.so' % dir_path)
# self.lib.Smiles2Graph.restype = ctypes.c_void_p
self.lib.PrepareBatchFeature.restype = ctypes.c_int
self.lib.DumpFeatures.restype = ctypes.c_int
self.lib.LoadMolGraph.restype = ctypes.c_int
self.lib.NodeFeatDim.restype = ctypes.c_int
self.lib.EdgeFeatDim.restype = ctypes.c_int
self.lib.NumNodes.restype = ctypes.c_int
self.lib.NumEdges.restype = ctypes.c_int
self.lib.EdgeList.restype = ctypes.c_void_p
self.num_node_feats = self.lib.NodeFeatDim()
self.num_edge_feats = self.lib.EdgeFeatDim()
def PrepareFeatureLabel(self, molgraph_list):
c_list = (ctypes.c_void_p * len(molgraph_list))()
total_num_nodes = 0
total_num_edges = 0
for i in range(len(molgraph_list)):
c_list[i] = molgraph_list[i].handle
total_num_nodes += molgraph_list[i].num_nodes
total_num_edges += molgraph_list[i].num_edges
torch_node_feat = torch.zeros(total_num_nodes, self.num_node_feats)
torch_edge_feat = torch.zeros(total_num_edges * 2, self.num_edge_feats)
torch_label = torch.zeros(len(molgraph_list), 1)
node_feat = torch_node_feat.numpy()
edge_feat = torch_edge_feat.numpy()
label = torch_label.numpy()
self.lib.PrepareBatchFeature(len(molgraph_list), ctypes.cast(c_list, ctypes.c_void_p),
ctypes.c_void_p(node_feat.ctypes.data),
ctypes.c_void_p(edge_feat.ctypes.data))
for i in range(len(molgraph_list)):
label[i] = molgraph_list[i].pce
return torch_node_feat, torch_edge_feat, torch_label
def DumpFeatures(self, fname):
p = ctypes.cast(fname, ctypes.c_char_p)
self.lib.DumpFeatures(p)
def LoadMolGraph(self, phase, str_pce_tuples):
fname = 'data/%s.txt.bin' % phase
assert os.path.isfile(fname)
fname = ctypes.cast(fname, ctypes.c_char_p)
num_graphs = len(str_pce_tuples)
c_list = (ctypes.c_void_p * num_graphs)()
t = self.lib.LoadMolGraph(fname, ctypes.cast(c_list, ctypes.c_void_p))
assert t == num_graphs
molgraph_list = []
for i in tqdm(range(0, t)):
g = MolGraph(c_list[i], str_pce_tuples[i][0], str_pce_tuples[i][1])
molgraph_list.append(g)
return molgraph_list
# def __CtypeNetworkX(self, g):
# edges = g.edges()
# e_list_from = (ctypes.c_int * len(edges))()
# e_list_to = (ctypes.c_int * len(edges))()
# if len(edges):
# a, b = zip(*edges)
# e_list_from[:] = a
# e_list_to[:] = b
# return (len(g.nodes()), len(edges), ctypes.cast(e_list_from, ctypes.c_void_p), ctypes.cast(e_list_to, ctypes.c_void_p))
# def TakeSnapshot(self):
# self.lib.UpdateSnapshot()
# def ClearTrainGraphs(self):
# self.ngraph_train = 0
# self.lib.ClearTrainGraphs()
# def InsertGraph(self, g, is_test):
# n_nodes, n_edges, e_froms, e_tos = self.__CtypeNetworkX(g)
# if is_test:
# t = self.ngraph_test
# self.ngraph_test += 1
# else:
# t = self.ngraph_train
# self.ngraph_train += 1
# self.lib.InsertGraph(is_test, t, n_nodes, n_edges, e_froms, e_tos)
# def LoadModel(self, path_to_model):
# p = ctypes.cast(path_to_model, ctypes.c_char_p)
# self.lib.LoadModel(p)
# def GetSol(self, gid, maxn):
# sol = (ctypes.c_int * (maxn + 10))()
# val = self.lib.GetSol(gid, sol)
# return val, sol
dll_path = '%s/build/dll/libmol.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
MOLLIB = _mol_lib()
class MolGraph(object):
def __init__(self, handle, smiles, pce):
self.smiles = smiles
self.handle = ctypes.c_void_p(handle)
self.num_nodes = MOLLIB.lib.NumNodes(self.handle)
self.num_edges = MOLLIB.lib.NumEdges(self.handle)
# self.edge_pairs = np.ctypeslib.as_array(MOLLIB.lib.EdgeList(self.handle), shape=( self.num_edges * 2, ))
self.edge_pairs = ctypes.c_void_p(MOLLIB.lib.EdgeList(self.handle))
self.pce = pce
else:
MOLLIB = None
MolGraph = None
if __name__ == '__main__':
MOLLIB.DumpFeatures('data/train.txt')
MOLLIB.DumpFeatures('data/valid.txt')
MOLLIB.DumpFeatures('data/test.txt') | 4,800 | 34.828358 | 129 | py |
GXN | GXN-main/pytorch_structure2vec-master/harvard_cep/util.py | from __future__ import print_function
import numpy as np
import random
from tqdm import tqdm
import os
import cPickle as cp
def load_raw_data():
print('loading data')
raw_data_dict = {}
for fname in ['train', 'valid', 'test']:
d = []
with open('./data/%s.txt' % fname, 'r') as f:
for row in f:
row = row.split()
d.append( (row[0].strip(), float(row[1].strip())) )
raw_data_dict[fname] = d
print('%s: %d' % (fname, len(d)))
return raw_data_dict
def find_weight_idx(lower, upper, weights, pce):
for i in range(len(lower)):
if pce >= lower[i] and pce < upper[i]:
return i
return -1
def resampling_idxes(d):
print('resampling indices')
labels = []
for t in d['train']:
labels.append(t[1])
width = 0.05
labels = np.array(labels, float)
lower = min(labels)
upper = max(labels)
cnt = []
for i in np.arange(lower, upper, width):
num = ((labels >= i) & (labels < i + width)).sum()
cnt.append(num)
cnt = np.array(cnt, float)
max_cnt = max(cnt)
cur = lower
region_tuples = []
for i in range(len(cnt)):
region_tuples.append((cur, cur + width, max_cnt / cnt[i]))
cur += width
pce_values = []
for p in d['train']:
pce_values.append(p[1])
lower, upper, weights = zip(*region_tuples)
sample_idxes = {}
output_cnt = {}
for i in range(len(lower)):
sample_idxes.setdefault(i, [])
output_cnt.setdefault(i, 0)
for i in range(len(pce_values)):
idx = int(pce_values[i] / width)
sample_idxes[idx].append(i)
total_samples = 0
for i in sample_idxes:
if len(sample_idxes[i]) > total_samples:
total_samples = len(sample_idxes[i])
total_samples = int(total_samples * len(weights))
train_idxes = []
for i in tqdm(range(total_samples)):
idx = random.randint(0, len(weights) - 1)
if output_cnt[idx] < len(sample_idxes[idx]):
train_idxes.append(sample_idxes[idx][output_cnt[idx]])
else:
sample = random.randint(0, len(sample_idxes[idx]) - 1)
train_idxes.append(sample_idxes[idx][sample])
output_cnt[idx] += 1
random.shuffle(train_idxes)
return train_idxes
| 2,341 | 26.232558 | 67 | py |
GEL | GEL-master/setup.py | import setuptools
from glob import glob
from shutil import copyfile, copytree
from setuptools import setup
from os import path,makedirs
with open("README.md", "r") as fh:
long_description = fh.read()
# Create the build directory
makedirs("build/pygel3d",exist_ok=True)
# Now copy the python files to build directory
copytree("src/PyGEL/pygel3d","build/pygel3d",dirs_exist_ok=True)
# Copy the libraries to the right place.
libs_data = []
libs = glob('build/*.dylib')+glob('build/*.so*')+glob('build/**/*.dll',recursive=True)
for lib_file in libs:
_,fn = path.split(lib_file)
dst = "build/pygel3d/"+fn
copyfile(lib_file,dst)
libs_data += [fn]
print("Found these libraries: ", libs_data)
setuptools.setup(
name="PyGEL3D",
version="0.3.1",
author="Andreas Baerentzen",
author_email="janba@dtu.dk",
description="PyGEL 3D (Python Bindings for GEL) contains tools for polygonal mesh based geometry processing",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://www2.compute.dtu.dk/projects/GEL/PyGEL/",
packages = ['pygel3d'],
package_dir = {'':'build'},
package_data = {'pygel3d':libs_data},
install_requires = ['numpy','plotly','scipy'],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux"
],)
| 1,458 | 31.422222 | 113 | py |
GEL | GEL-master/GEL_UNIX/buildPyGEL.py | #!/usr/bin/python
import os
import glob
from fabricate import *
flags = ['-std=c++11', '-I../src', '-lGEL', '-fPIC']
build_dir = 'build/PyGEL'
target = 'libPyGEL.so'
dirs = ['../src/PyGEL']
dependencies = ['-lGEL', '-lGL', '-lGLEW', '-lm', '-lglfw', '-lstdc++']
sources = []
for dir in dirs:
for file in glob.glob(dir + '/*.cpp'):
base_file_name, ext = os.path.splitext(file)
sources.append(base_file_name)
def build():
if not os.path.exists(build_dir):
os.makedirs(build_dir)
compile()
link()
def oname(build_dir, filename):
return os.path.join(build_dir, os.path.basename(filename))
def compile():
for source in sources:
run('gcc', flags, '-c', source+'.cpp', '-o', oname(build_dir, source+'.o'))
def link():
objects = [oname(build_dir, s+'.o') for s in sources]
after()
run('gcc', flags, '-shared', '-Wl,-unresolved-symbols=report-all', objects, '-o', oname(build_dir, target), dependencies)
def clean():
autoclean()
main(parallel_ok=True, jobs=10)
| 1,039 | 21.12766 | 125 | py |
GEL | GEL-master/GEL_UNIX/buildGEL.py | #!/usr/bin/python
import os
import glob
from fabricate import *
flags = ['-std=c++11', '-I../src/GEL', '-fPIC']
build_dir = 'build/GEL'
target = 'libGEL.so'
dirs = ['../src/GEL/CGLA', '../src/GEL/GLGraphics', '../src/GEL/Geometry', '../src/GEL/HMesh', '../src/GEL/Util']
dependencies = ['-lGL', '-lGLU', '-lGLEW', '-lc', '-lm', '-lpthread', '-lstdc++']
sources = []
for dir in dirs:
for file in glob.glob(dir + '/*.c*'):
sources.append(file)
def build():
if not os.path.exists(build_dir):
os.makedirs(build_dir)
compile()
link()
def oname(build_dir, filename):
return os.path.join(build_dir, os.path.basename(filename))
def compile():
for source in sources:
base, ext = os.path.splitext(source)
run('gcc', flags, '-c', source, '-o', oname(build_dir, base+'.o'))
def link():
objects = []
for source in sources:
base, ext = os.path.splitext(source)
objects.append(oname(build_dir, base+'.o'))
after()
run('gcc', flags, '-shared', '-Wl,-unresolved-symbols=report-all', objects, '-o', oname(build_dir, target), dependencies)
def clean():
autoclean()
main(parallel_ok=True, jobs=10)
| 1,183 | 23.666667 | 125 | py |
GEL | GEL-master/GEL_UNIX/fabricate.py | #!/usr/bin/env python
"""Build tool that finds dependencies automatically for any language.
fabricate is a build tool that finds dependencies automatically for any
language. It's small and just works. No hidden stuff behind your back. It was
inspired by Bill McCloskey's make replacement, memoize, but fabricate works on
Windows as well as Linux.
Read more about how to use it and how it works on the project page:
http://code.google.com/p/fabricate/
Like memoize, fabricate is released under a "New BSD license". fabricate is
copyright (c) 2009 Brush Technology. Full text of the license is here:
http://code.google.com/p/fabricate/wiki/License
To get help on fabricate functions:
from fabricate import *
help(function)
"""
from __future__ import with_statement
# fabricate version number
__version__ = '1.26'
# if version of .deps file has changed, we know to not use it
deps_version = 2
import atexit
import optparse
import os
import platform
import re
import shlex
import stat
import subprocess
import sys
import tempfile
import time
import threading # NB uses old camelCase names for backward compatibility
# multiprocessing module only exists on Python >= 2.6
try:
import multiprocessing
except ImportError:
class MultiprocessingModule(object):
def __getattr__(self, name):
raise NotImplementedError("multiprocessing module not available, can't do parallel builds")
multiprocessing = MultiprocessingModule()
# so you can do "from fabricate import *" to simplify your build script
__all__ = ['setup', 'run', 'autoclean', 'main', 'shell', 'fabricate_version',
'memoize', 'outofdate', 'parse_options', 'after',
'ExecutionError', 'md5_hasher', 'mtime_hasher',
'Runner', 'AtimesRunner', 'StraceRunner', 'AlwaysRunner',
'SmartRunner', 'Builder']
import textwrap
__doc__ += "Exported functions are:\n" + ' ' + '\n '.join(textwrap.wrap(', '.join(__all__), 80))
FAT_atime_resolution = 24*60*60 # resolution on FAT filesystems (seconds)
FAT_mtime_resolution = 2
# NTFS resolution is < 1 ms
# We assume this is considerably more than time to run a new process
NTFS_atime_resolution = 0.0002048 # resolution on NTFS filesystems (seconds)
NTFS_mtime_resolution = 0.0002048 # is actually 0.1us but python's can be
# as low as 204.8us due to poor
# float precision when storing numbers
# as big as NTFS file times can be
# (float has 52-bit precision and NTFS
# FILETIME has 63-bit precision, so
# we've lost 11 bits = 2048)
# So we can use md5func in old and new versions of Python without warnings
try:
import hashlib
md5func = hashlib.md5
except ImportError:
import md5
md5func = md5.new
# Use json, or pickle on older Python versions if simplejson not installed
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
import cPickle
# needed to ignore the indent= argument for pickle's dump()
class PickleJson:
def load(self, f):
return cPickle.load(f)
def dump(self, obj, f, indent=None, sort_keys=None):
return cPickle.dump(obj, f)
json = PickleJson()
def printerr(message):
""" Print given message to stderr with a line feed. """
print >>sys.stderr, message
class PathError(Exception):
pass
class ExecutionError(Exception):
""" Raised by shell() and run() if command returns non-zero exit code. """
pass
def args_to_list(args):
""" Return a flat list of the given arguments for shell(). """
arglist = []
for arg in args:
if arg is None:
continue
if hasattr(arg, '__iter__'):
arglist.extend(args_to_list(arg))
else:
if not isinstance(arg, basestring):
arg = str(arg)
arglist.append(arg)
return arglist
def shell(*args, **kwargs):
r""" Run a command: program name is given in first arg and command line
arguments in the rest of the args. Iterables (lists and tuples) in args
are recursively converted to separate arguments, non-string types are
converted with str(arg), and None is ignored. For example:
>>> def tail(input, n=3, flags=None):
>>> args = ['-n', n]
>>> return shell('tail', args, flags, input=input)
>>> tail('a\nb\nc\nd\ne\n')
'c\nd\ne\n'
>>> tail('a\nb\nc\nd\ne\n', 2, ['-v'])
'==> standard input <==\nd\ne\n'
Keyword arguments kwargs are interpreted as follows:
"input" is a string to pass standard input into the process (or the
default of None to use parent's stdin, eg: the keyboard)
"silent" is True (default) to return process's standard output as a
string, or False to print it as it comes out
"shell" set to True will run the command via the shell (/bin/sh or
COMSPEC) instead of running the command directly (the default)
"ignore_status" set to True means ignore command status code -- i.e.,
don't raise an ExecutionError on nonzero status code
Any other kwargs are passed directly to subprocess.Popen
Raises ExecutionError(message, output, status) if the command returns
a non-zero status code. """
try:
return _shell(args, **kwargs)
finally:
sys.stderr.flush()
sys.stdout.flush()
def _shell(args, input=None, silent=True, shell=False, ignore_status=False, **kwargs):
if input:
stdin = subprocess.PIPE
else:
stdin = None
if silent:
stdout = subprocess.PIPE
else:
stdout = None
arglist = args_to_list(args)
if not arglist:
raise TypeError('shell() takes at least 1 argument (0 given)')
if shell:
# handle subprocess.Popen quirk where subsequent args are passed
# to bash instead of to our command
command = subprocess.list2cmdline(arglist)
else:
command = arglist
try:
proc = subprocess.Popen(command, stdin=stdin, stdout=stdout,
stderr=subprocess.STDOUT, shell=shell, **kwargs)
except OSError, e:
# Work around the problem that Windows Popen doesn't say what file it couldn't find
if platform.system() == 'Windows' and e.errno == 2 and e.filename is None:
e.filename = arglist[0]
raise e
output, stderr = proc.communicate(input)
status = proc.wait()
if status and not ignore_status:
raise ExecutionError('%r exited with status %d'
% (os.path.basename(arglist[0]), status),
output, status)
if silent:
return output
def md5_hasher(filename):
""" Return MD5 hash of given filename if it is a regular file or
a symlink with a hashable target, or the MD5 hash of the
target_filename if it is a symlink without a hashable target,
or the MD5 hash of the filename if it is a directory, or None
if file doesn't exist.
Note: Pyhton versions before 3.2 do not support os.readlink on
Windows so symlinks without a hashable target fall back to
a hash of the filename if the symlink target is a directory,
or None if the symlink is broken"""
try:
f = open(filename, 'rb')
try:
return md5func(f.read()).hexdigest()
finally:
f.close()
except IOError:
if hasattr(os, 'readlink') and os.path.islink(filename):
return md5func(os.readlink(filename)).hexdigest()
elif os.path.isdir(filename):
return md5func(filename).hexdigest()
return None
def mtime_hasher(filename):
""" Return modification time of file, or None if file doesn't exist. """
try:
st = os.stat(filename)
return repr(st.st_mtime)
except (IOError, OSError):
return None
class RunnerUnsupportedException(Exception):
""" Exception raise by Runner constructor if it is not supported
on the current platform."""
pass
class Runner(object):
def __call__(self, *args, **kwargs):
""" Run command and return (dependencies, outputs), where
dependencies is a list of the filenames of files that the
command depended on, and output is a list of the filenames
of files that the command modified. The input is passed
to shell()"""
raise NotImplementedError("Runner subclass called but subclass didn't define __call__")
def actual_runner(self):
""" Return the actual runner object (overriden in SmartRunner). """
return self
def ignore(self, name):
return self._builder.ignore.search(name)
class AtimesRunner(Runner):
def __init__(self, builder):
self._builder = builder
self.atimes = AtimesRunner.has_atimes(self._builder.dirs)
if self.atimes == 0:
raise RunnerUnsupportedException(
'atimes are not supported on this platform')
@staticmethod
def file_has_atimes(filename):
""" Return whether the given filesystem supports access time updates for
this file. Return:
- 0 if no a/mtimes not updated
- 1 if the atime resolution is at least one day and
the mtime resolution at least 2 seconds (as on FAT filesystems)
- 2 if the atime and mtime resolutions are both < ms
(NTFS filesystem has 100 ns resolution). """
def access_file(filename):
""" Access (read a byte from) file to try to update its access time. """
f = open(filename)
f.read(1)
f.close()
initial = os.stat(filename)
os.utime(filename, (
initial.st_atime-FAT_atime_resolution,
initial.st_mtime-FAT_mtime_resolution))
adjusted = os.stat(filename)
access_file(filename)
after = os.stat(filename)
# Check that a/mtimes actually moved back by at least resolution and
# updated by a file access.
# add NTFS_atime_resolution to account for float resolution factors
# Comment on resolution/2 in atimes_runner()
if initial.st_atime-adjusted.st_atime > FAT_atime_resolution+NTFS_atime_resolution or \
initial.st_mtime-adjusted.st_mtime > FAT_mtime_resolution+NTFS_atime_resolution or \
initial.st_atime==adjusted.st_atime or \
initial.st_mtime==adjusted.st_mtime or \
not after.st_atime-FAT_atime_resolution/2 > adjusted.st_atime:
return 0
os.utime(filename, (
initial.st_atime-NTFS_atime_resolution,
initial.st_mtime-NTFS_mtime_resolution))
adjusted = os.stat(filename)
# Check that a/mtimes actually moved back by at least resolution
# Note: != comparison here fails due to float rounding error
# double NTFS_atime_resolution to account for float resolution factors
if initial.st_atime-adjusted.st_atime > NTFS_atime_resolution*2 or \
initial.st_mtime-adjusted.st_mtime > NTFS_mtime_resolution*2 or \
initial.st_atime==adjusted.st_atime or \
initial.st_mtime==adjusted.st_mtime:
return 1
return 2
@staticmethod
def exists(path):
if not os.path.exists(path):
# Note: in linux, error may not occur: strace runner doesn't check
raise PathError("build dirs specified a non-existant path '%s'" % path)
@staticmethod
def has_atimes(paths):
""" Return whether a file created in each path supports atimes and mtimes.
Return value is the same as used by file_has_atimes
Note: for speed, this only tests files created at the top directory
of each path. A safe assumption in most build environments.
In the unusual case that any sub-directories are mounted
on alternate file systems that don't support atimes, the build may
fail to identify a dependency """
atimes = 2 # start by assuming we have best atimes
for path in paths:
AtimesRunner.exists(path)
handle, filename = tempfile.mkstemp(dir=path)
try:
try:
f = os.fdopen(handle, 'wb')
except:
os.close(handle)
raise
try:
f.write('x') # need a byte in the file for access test
finally:
f.close()
atimes = min(atimes, AtimesRunner.file_has_atimes(filename))
finally:
os.remove(filename)
return atimes
def _file_times(self, path, depth):
""" Helper function for file_times().
Return a dict of file times, recursing directories that don't
start with self._builder.ignoreprefix """
AtimesRunner.exists(path)
names = os.listdir(path)
times = {}
ignoreprefix = self._builder.ignoreprefix
for name in names:
if ignoreprefix and name.startswith(ignoreprefix):
continue
if path == '.':
fullname = name
else:
fullname = os.path.join(path, name)
st = os.stat(fullname)
if stat.S_ISDIR(st.st_mode):
if depth > 1:
times.update(self._file_times(fullname, depth-1))
elif stat.S_ISREG(st.st_mode):
times[fullname] = st.st_atime, st.st_mtime
return times
def file_times(self):
""" Return a dict of "filepath: (atime, mtime)" entries for each file
in self._builder.dirs. "filepath" is the absolute path, "atime" is
the access time, "mtime" the modification time.
Recurse directories that don't start with
self._builder.ignoreprefix and have depth less than
self._builder.dirdepth. """
times = {}
for path in self._builder.dirs:
times.update(self._file_times(path, self._builder.dirdepth))
return times
def _utime(self, filename, atime, mtime):
""" Call os.utime but ignore permission errors """
try:
os.utime(filename, (atime, mtime))
except OSError, e:
# ignore permission errors -- we can't build with files
# that we can't access anyway
if e.errno != 1:
raise
def _age_atimes(self, filetimes):
""" Age files' atimes and mtimes to be at least FAT_xx_resolution old.
Only adjust if the given filetimes dict says it isn't that old,
and return a new dict of filetimes with the ages adjusted. """
adjusted = {}
now = time.time()
for filename, entry in filetimes.iteritems():
if now-entry[0] < FAT_atime_resolution or now-entry[1] < FAT_mtime_resolution:
entry = entry[0] - FAT_atime_resolution, entry[1] - FAT_mtime_resolution
self._utime(filename, entry[0], entry[1])
adjusted[filename] = entry
return adjusted
def __call__(self, *args, **kwargs):
""" Run command and return its dependencies and outputs, using before
and after access times to determine dependencies. """
# For Python pre-2.5, ensure os.stat() returns float atimes
old_stat_float = os.stat_float_times()
os.stat_float_times(True)
originals = self.file_times()
if self.atimes == 2:
befores = originals
atime_resolution = 0
mtime_resolution = 0
else:
befores = self._age_atimes(originals)
atime_resolution = FAT_atime_resolution
mtime_resolution = FAT_mtime_resolution
shell_keywords = dict(silent=False)
shell_keywords.update(kwargs)
shell(*args, **shell_keywords)
afters = self.file_times()
deps = []
outputs = []
for name in afters:
if name in befores:
# if file exists before+after && mtime changed, add to outputs
# Note: Can't just check that atimes > than we think they were
# before because os might have rounded them to a later
# date than what we think we set them to in befores.
# So we make sure they're > by at least 1/2 the
# resolution. This will work for anything with a
# resolution better than FAT.
if afters[name][1]-mtime_resolution/2 > befores[name][1]:
if not self.ignore(name):
outputs.append(name)
elif afters[name][0]-atime_resolution/2 > befores[name][0]:
# otherwise add to deps if atime changed
if not self.ignore(name):
deps.append(name)
else:
# file created (in afters but not befores), add as output
if not self.ignore(name):
outputs.append(name)
if self.atimes < 2:
# Restore atimes of files we didn't access: not for any functional
# reason -- it's just to preserve the access time for the user's info
for name in deps:
originals.pop(name)
for name in originals:
original = originals[name]
if original != afters.get(name, None):
self._utime(name, original[0], original[1])
os.stat_float_times(old_stat_float) # restore stat_float_times value
return deps, outputs
class StraceProcess(object):
def __init__(self, cwd='.', delayed=False):
self.cwd = cwd
self.deps = set()
self.outputs = set()
self.delayed = delayed
self.delayed_lines = []
def add_dep(self, dep):
self.deps.add(dep)
def add_output(self, output):
self.outputs.add(output)
def add_delayed_line(self, line):
self.delayed_lines.append(line)
def __str__(self):
return '<StraceProcess cwd=%s deps=%s outputs=%s>' % \
(self.cwd, self.deps, self.outputs)
def _call_strace(self, *args, **kwargs):
""" Top level function call for Strace that can be run in parallel """
return self(*args, **kwargs)
class StraceRunner(Runner):
keep_temps = False
def __init__(self, builder, build_dir=None):
self.strace_system_calls = StraceRunner.get_strace_system_calls()
if self.strace_system_calls is None:
raise RunnerUnsupportedException('strace is not available')
self._builder = builder
self.temp_count = 0
self.build_dir = os.path.abspath(build_dir or os.getcwd())
@staticmethod
def get_strace_system_calls():
""" Return None if this system doesn't have strace, otherwise
return a comma seperated list of system calls supported by strace. """
if platform.system() == 'Windows':
# even if windows has strace, it's probably a dodgy cygwin one
return None
possible_system_calls = ['open','stat', 'stat64', 'lstat', 'lstat64',
'execve','exit_group','chdir','mkdir','rename','clone','vfork',
'fork','symlink','creat']
valid_system_calls = []
try:
# check strace is installed and if it supports each type of call
for system_call in possible_system_calls:
proc = subprocess.Popen(['strace', '-e', 'trace=' + system_call], stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
proc.wait()
if 'invalid system call' not in stderr:
valid_system_calls.append(system_call)
except OSError:
return None
return ','.join(valid_system_calls)
# Regular expressions for parsing of strace log
_open_re = re.compile(r'(?P<pid>\d+)\s+open\("(?P<name>[^"]*)", (?P<mode>[^,)]*)')
_stat_re = re.compile(r'(?P<pid>\d+)\s+l?stat(?:64)?\("(?P<name>[^"]*)", .*') # stat,lstat,stat64,lstat64
_execve_re = re.compile(r'(?P<pid>\d+)\s+execve\("(?P<name>[^"]*)", .*')
_creat_re = re.compile(r'(?P<pid>\d+)\s+creat\("(?P<name>[^"]*)", .*')
_mkdir_re = re.compile(r'(?P<pid>\d+)\s+mkdir\("(?P<name>[^"]*)", .*\)\s*=\s(?P<result>-?[0-9]*).*')
_rename_re = re.compile(r'(?P<pid>\d+)\s+rename\("[^"]*", "(?P<name>[^"]*)"\)')
_symlink_re = re.compile(r'(?P<pid>\d+)\s+symlink\("[^"]*", "(?P<name>[^"]*)"\)')
_kill_re = re.compile(r'(?P<pid>\d+)\s+killed by.*')
_chdir_re = re.compile(r'(?P<pid>\d+)\s+chdir\("(?P<cwd>[^"]*)"\)')
_exit_group_re = re.compile(r'(?P<pid>\d+)\s+exit_group\((?P<status>.*)\).*')
_clone_re = re.compile(r'(?P<pid_clone>\d+)\s+(clone|fork|vfork)\(.*\)\s*=\s*(?P<pid>\d*)')
# Regular expressions for detecting interrupted lines in strace log
# 3618 clone( <unfinished ...>
# 3618 <... clone resumed> child_stack=0, flags=CLONE, child_tidptr=0x7f83deffa780) = 3622
_unfinished_start_re = re.compile(r'(?P<pid>\d+)(?P<body>.*)<unfinished ...>$')
_unfinished_end_re = re.compile(r'(?P<pid>\d+)\s+\<\.\.\..*\>(?P<body>.*)')
def _do_strace(self, args, kwargs, outfile, outname):
""" Run strace on given command args/kwargs, sending output to file.
Return (status code, list of dependencies, list of outputs). """
shell_keywords = dict(silent=False)
shell_keywords.update(kwargs)
try:
shell('strace', '-fo', outname, '-e',
'trace=' + self.strace_system_calls,
args, **shell_keywords)
except ExecutionError, e:
# if strace failed to run, re-throw the exception
# we can tell this happend if the file is empty
outfile.seek(0, os.SEEK_END)
if outfile.tell() is 0:
raise e
else:
# reset the file postion for reading
outfile.seek(0)
self.status = 0
processes = {} # dictionary of processes (key = pid)
unfinished = {} # list of interrupted entries in strace log
for line in outfile:
self._match_line(line, processes, unfinished)
# collect outputs and dependencies from all processes
deps = set()
outputs = set()
for pid, process in processes.items():
deps = deps.union(process.deps)
outputs = outputs.union(process.outputs)
return self.status, list(deps), list(outputs)
def _match_line(self, line, processes, unfinished):
# look for split lines
unfinished_start_match = self._unfinished_start_re.match(line)
unfinished_end_match = self._unfinished_end_re.match(line)
if unfinished_start_match:
pid = unfinished_start_match.group('pid')
body = unfinished_start_match.group('body')
unfinished[pid] = pid + ' ' + body
return
elif unfinished_end_match:
pid = unfinished_end_match.group('pid')
body = unfinished_end_match.group('body')
line = unfinished[pid] + body
del unfinished[pid]
is_output = False
open_match = self._open_re.match(line)
stat_match = self._stat_re.match(line)
execve_match = self._execve_re.match(line)
creat_match = self._creat_re.match(line)
mkdir_match = self._mkdir_re.match(line)
symlink_match = self._symlink_re.match(line)
rename_match = self._rename_re.match(line)
clone_match = self._clone_re.match(line)
kill_match = self._kill_re.match(line)
if kill_match:
return None, None, None
match = None
if execve_match:
pid = execve_match.group('pid')
match = execve_match # Executables can be dependencies
if pid not in processes and len(processes) == 0:
# This is the first process so create dict entry
processes[pid] = StraceProcess()
elif clone_match:
pid = clone_match.group('pid')
pid_clone = clone_match.group('pid_clone')
if pid not in processes:
# Simple case where there are no delayed lines
processes[pid] = StraceProcess(processes[pid_clone].cwd)
else:
# Some line processing was delayed due to an interupted clone_match
processes[pid].cwd = processes[pid_clone].cwd # Set the correct cwd
processes[pid].delayed = False # Set that matching is no longer delayed
for delayed_line in processes[pid].delayed_lines:
# Process all the delayed lines
self._match_line(delayed_line, processes, unfinished)
processes[pid].delayed_lines = [] # Clear the lines
elif open_match:
match = open_match
mode = match.group('mode')
if 'O_WRONLY' in mode or 'O_RDWR' in mode:
# it's an output file if opened for writing
is_output = True
elif stat_match:
match = stat_match
elif creat_match:
match = creat_match
# a created file is an output file
is_output = True
elif mkdir_match:
match = mkdir_match
if match.group('result') == '0':
# a created directory is an output file
is_output = True
elif symlink_match:
match = symlink_match
# the created symlink is an output file
is_output = True
elif rename_match:
match = rename_match
# the destination of a rename is an output file
is_output = True
if match:
name = match.group('name')
pid = match.group('pid')
if not self._matching_is_delayed(processes, pid, line):
cwd = processes[pid].cwd
if cwd != '.':
name = os.path.join(cwd, name)
# normalise path name to ensure files are only listed once
name = os.path.normpath(name)
# if it's an absolute path name under the build directory,
# make it relative to build_dir before saving to .deps file
if os.path.isabs(name) and name.startswith(self.build_dir):
name = name[len(self.build_dir):]
name = name.lstrip(os.path.sep)
if (self._builder._is_relevant(name)
and not self.ignore(name)
and os.path.lexists(name)):
if is_output:
processes[pid].add_output(name)
else:
processes[pid].add_dep(name)
match = self._chdir_re.match(line)
if match:
pid = match.group('pid')
if not self._matching_is_delayed(processes, pid, line):
processes[pid].cwd = os.path.join(processes[pid].cwd, match.group('cwd'))
match = self._exit_group_re.match(line)
if match:
self.status = int(match.group('status'))
def _matching_is_delayed(self, processes, pid, line):
# Check if matching is delayed and cache a delayed line
if pid not in processes:
processes[pid] = StraceProcess(delayed=True)
process = processes[pid]
if process.delayed:
process.add_delayed_line(line)
return True
else:
return False
def __call__(self, *args, **kwargs):
""" Run command and return its dependencies and outputs, using strace
to determine dependencies (by looking at what files are opened or
modified). """
ignore_status = kwargs.pop('ignore_status', False)
if self.keep_temps:
outname = 'strace%03d.txt' % self.temp_count
self.temp_count += 1
handle = os.open(outname, os.O_CREAT)
else:
handle, outname = tempfile.mkstemp()
try:
try:
outfile = os.fdopen(handle, 'r')
except:
os.close(handle)
raise
try:
status, deps, outputs = self._do_strace(args, kwargs, outfile, outname)
if status is None:
raise ExecutionError(
'%r was killed unexpectedly' % args[0], '', -1)
finally:
outfile.close()
finally:
if not self.keep_temps:
os.remove(outname)
if status and not ignore_status:
raise ExecutionError('%r exited with status %d'
% (os.path.basename(args[0]), status),
'', status)
return list(deps), list(outputs)
class AlwaysRunner(Runner):
def __init__(self, builder):
pass
def __call__(self, *args, **kwargs):
""" Runner that always runs given command, used as a backup in case
a system doesn't have strace or atimes. """
shell_keywords = dict(silent=False)
shell_keywords.update(kwargs)
shell(*args, **shell_keywords)
return None, None
class SmartRunner(Runner):
""" Smart command runner that uses StraceRunner if it can,
otherwise AtimesRunner if available, otherwise AlwaysRunner. """
def __init__(self, builder):
self._builder = builder
try:
self._runner = StraceRunner(self._builder)
except RunnerUnsupportedException:
try:
self._runner = AtimesRunner(self._builder)
except RunnerUnsupportedException:
self._runner = AlwaysRunner(self._builder)
def actual_runner(self):
return self._runner
def __call__(self, *args, **kwargs):
return self._runner(*args, **kwargs)
class _running(object):
""" Represents a task put on the parallel pool
and its results when complete """
def __init__(self, async, command):
""" "async" is the AsyncResult object returned from pool.apply_async
"command" is the command that was run """
self.async = async
self.command = command
self.results = None
class _after(object):
""" Represents something waiting on completion of some previous commands """
def __init__(self, afters, do):
""" "afters" is a group id or a iterable of group ids to wait on
"do" is either a tuple representing a command (group, command,
arglist, kwargs) or a threading.Condition to be released """
self.afters = afters
self.do = do
self.done = False
class _Groups(object):
""" Thread safe mapping object whose values are lists of _running
or _after objects and a count of how many have *not* completed """
class value(object):
""" the value type in the map """
def __init__(self, val=None):
self.count = 0 # count of items not yet completed.
# This also includes count_in_false number
self.count_in_false = 0 # count of commands which is assigned
# to False group, but will be moved
# to this group.
self.items = [] # items in this group
if val is not None:
self.items.append(val)
self.ok = True # True if no error from any command in group so far
def __init__(self):
self.groups = {False: self.value()}
self.lock = threading.Lock()
def item_list(self, id):
""" Return copy of the value list """
with self.lock:
return self.groups[id].items[:]
def remove(self, id):
""" Remove the group """
with self.lock:
del self.groups[id]
def remove_item(self, id, val):
with self.lock:
self.groups[id].items.remove(val)
def add(self, id, val):
with self.lock:
if id in self.groups:
self.groups[id].items.append(val)
else:
self.groups[id] = self.value(val)
self.groups[id].count += 1
def ensure(self, id):
"""if id does not exit, create it without any value"""
with self.lock:
if not id in self.groups:
self.groups[id] = self.value()
def get_count(self, id):
with self.lock:
if id not in self.groups:
return 0
return self.groups[id].count
def dec_count(self, id):
with self.lock:
c = self.groups[id].count - 1
if c < 0:
raise ValueError
self.groups[id].count = c
return c
def get_ok(self, id):
with self.lock:
return self.groups[id].ok
def set_ok(self, id, to):
with self.lock:
self.groups[id].ok = to
def ids(self):
with self.lock:
return self.groups.keys()
# modification to reserve blocked commands to corresponding groups
def inc_count_for_blocked(self, id):
with self.lock:
if not id in self.groups:
self.groups[id] = self.value()
self.groups[id].count += 1
self.groups[id].count_in_false += 1
def add_for_blocked(self, id, val):
# modification of add(), in order to move command from False group
# to actual group
with self.lock:
# id must be registered before
self.groups[id].items.append(val)
# count does not change (already considered
# in inc_count_for_blocked), but decrease count_in_false.
c = self.groups[id].count_in_false - 1
if c < 0:
raise ValueError
self.groups[id].count_in_false = c
# pool of processes to run parallel jobs, must not be part of any object that
# is pickled for transfer to these processes, ie it must be global
_pool = None
# object holding results, must also be global
_groups = _Groups()
# results collecting thread
_results = None
_stop_results = threading.Event()
class _todo(object):
""" holds the parameters for commands waiting on others """
def __init__(self, group, command, arglist, kwargs):
self.group = group # which group it should run as
self.command = command # string command
self.arglist = arglist # command arguments
self.kwargs = kwargs # keywork args for the runner
def _results_handler( builder, delay=0.01):
""" Body of thread that stores results in .deps and handles 'after'
conditions
"builder" the builder used """
try:
while not _stop_results.isSet():
# go through the lists and check any results available
for id in _groups.ids():
if id is False: continue # key of False is _afters not _runnings
for r in _groups.item_list(id):
if r.results is None and r.async.ready():
try:
d, o = r.async.get()
except Exception, e:
r.results = e
_groups.set_ok(id, False)
message, data, status = e
printerr("fabricate: " + message)
else:
builder.done(r.command, d, o) # save deps
r.results = (r.command, d, o)
_groups.dec_count(id)
# check if can now schedule things waiting on the after queue
for a in _groups.item_list(False):
still_to_do = sum(_groups.get_count(g) for g in a.afters)
no_error = all(_groups.get_ok(g) for g in a.afters)
if False in a.afters:
still_to_do -= 1 # don't count yourself of course
if still_to_do == 0:
if isinstance(a.do, _todo):
if no_error:
async = _pool.apply_async(_call_strace, a.do.arglist,
a.do.kwargs)
_groups.add_for_blocked(a.do.group, _running(async, a.do.command))
else:
# Mark the command as not done due to errors
r = _running(None, a.do.command)
_groups.add_for_blocked(a.do.group, r)
r.results = False;
_groups.set_ok(a.do.group, False)
_groups.dec_count(a.do.group)
elif isinstance(a.do, threading._Condition):
# is this only for threading._Condition in after()?
a.do.acquire()
# only mark as done if there is no error
a.done = no_error
a.do.notify()
a.do.release()
# else: #are there other cases?
_groups.remove_item(False, a)
_groups.dec_count(False)
_stop_results.wait(delay)
except Exception:
etype, eval, etb = sys.exc_info()
printerr("Error: exception " + repr(etype) + " at line " + str(etb.tb_lineno))
finally:
if not _stop_results.isSet():
# oh dear, I am about to die for unexplained reasons, stop the whole
# app otherwise the main thread hangs waiting on non-existant me,
# Note: sys.exit() only kills me
printerr("Error: unexpected results handler exit")
os._exit(1)
class Builder(object):
""" The Builder.
You may supply a "runner" class to change the way commands are run
or dependencies are determined. For an example, see:
http://code.google.com/p/fabricate/wiki/HowtoMakeYourOwnRunner
A "runner" must be a subclass of Runner and must have a __call__()
function that takes a command as a list of args and returns a tuple of
(deps, outputs), where deps is a list of rel-path'd dependency files
and outputs is a list of rel-path'd output files. The default runner
is SmartRunner, which automatically picks one of StraceRunner,
AtimesRunner, or AlwaysRunner depending on your system.
A "runner" class may have an __init__() function that takes the
builder as a parameter.
"""
def __init__(self, runner=None, dirs=None, dirdepth=100, ignoreprefix='.',
ignore=None, hasher=md5_hasher, depsname='.deps',
quiet=False, debug=False, inputs_only=False, parallel_ok=False):
""" Initialise a Builder with the given options.
"runner" specifies how programs should be run. It is either a
callable compatible with the Runner class, or a string selecting
one of the standard runners ("atimes_runner", "strace_runner",
"always_runner", or "smart_runner").
"dirs" is a list of paths to look for dependencies (or outputs) in
if using the strace or atimes runners.
"dirdepth" is the depth to recurse into the paths in "dirs" (default
essentially means infinitely). Set to 1 to just look at the
immediate paths in "dirs" and not recurse at all. This can be
useful to speed up the AtimesRunner if you're building in a large
tree and you don't care about all of the subdirectories.
"ignoreprefix" prevents recursion into directories that start with
prefix. It defaults to '.' to ignore svn directories.
Change it to '_svn' if you use _svn hidden directories.
"ignore" is a regular expression. Any dependency that contains a
regex match is ignored and not put into the dependency list.
Note that the regex may be VERBOSE (spaces are ignored and # line
comments allowed -- use \ prefix to insert these characters)
"hasher" is a function which returns a string which changes when
the contents of its filename argument changes, or None on error.
Default is md5_hasher, but can also be mtime_hasher.
"depsname" is the name of the JSON dependency file to load/save.
"quiet" set to True tells the builder to not display the commands being
executed (or other non-error output).
"debug" set to True makes the builder print debug output, such as why
particular commands are being executed
"inputs_only" set to True makes builder only re-build if input hashes
have changed (ignores output hashes); use with tools that touch
files that shouldn't cause a rebuild; e.g. g++ collect phase
"parallel_ok" set to True to indicate script is safe for parallel running
"""
if dirs is None:
dirs = ['.']
self.dirs = dirs
self.dirdepth = dirdepth
self.ignoreprefix = ignoreprefix
if ignore is None:
ignore = r'$x^' # something that can't match
self.ignore = re.compile(ignore, re.VERBOSE)
self.depsname = depsname
self.hasher = hasher
self.quiet = quiet
self.debug = debug
self.inputs_only = inputs_only
self.checking = False
self.hash_cache = {}
# instantiate runner after the above have been set in case it needs them
if runner is not None:
self.set_runner(runner)
elif hasattr(self, 'runner'):
# For backwards compatibility, if a derived class has
# defined a "runner" method then use it:
pass
else:
self.runner = SmartRunner(self)
is_strace = isinstance(self.runner.actual_runner(), StraceRunner)
self.parallel_ok = parallel_ok and is_strace and _pool is not None
if self.parallel_ok:
global _results
_results = threading.Thread(target=_results_handler,
args=[self])
_results.setDaemon(True)
_results.start()
atexit.register(self._join_results_handler)
StraceRunner.keep_temps = False # unsafe for parallel execution
def echo(self, message):
""" Print message, but only if builder is not in quiet mode. """
if not self.quiet:
print message
def echo_command(self, command, echo=None):
""" Show a command being executed. Also passed run's "echo" arg
so you can override what's displayed.
"""
if echo is not None:
command = str(echo)
self.echo(command)
def echo_delete(self, filename, error=None):
""" Show a file being deleted. For subclassing Builder and overriding
this function, the exception is passed in if an OSError occurs
while deleting a file. """
if error is None:
self.echo('deleting %s' % filename)
else:
self.echo_debug('error deleting %s: %s' % (filename, error.strerror))
def echo_debug(self, message):
""" Print message, but only if builder is in debug mode. """
if self.debug:
print 'DEBUG:', message
def _run(self, *args, **kwargs):
after = kwargs.pop('after', None)
group = kwargs.pop('group', True)
echo = kwargs.pop('echo', None)
arglist = args_to_list(args)
if not arglist:
raise TypeError('run() takes at least 1 argument (0 given)')
# we want a command line string for the .deps file key and for display
command = subprocess.list2cmdline(arglist)
if not self.cmdline_outofdate(command):
if self.parallel_ok:
_groups.ensure(group)
return command, None, None
# if just checking up-to-date-ness, set flag and do nothing more
self.outofdate_flag = True
if self.checking:
if self.parallel_ok:
_groups.ensure(group)
return command, None, None
# use runner to run command and collect dependencies
self.echo_command(command, echo=echo)
if self.parallel_ok:
arglist.insert(0, self.runner)
if after is not None:
if not hasattr(after, '__iter__'):
after = [after]
# This command is registered to False group firstly,
# but the actual group of this command should
# count this blocked command as well as usual commands
_groups.inc_count_for_blocked(group)
_groups.add(False,
_after(after, _todo(group, command, arglist,
kwargs)))
else:
async = _pool.apply_async(_call_strace, arglist, kwargs)
_groups.add(group, _running(async, command))
return None
else:
deps, outputs = self.runner(*arglist, **kwargs)
return self.done(command, deps, outputs)
def run(self, *args, **kwargs):
""" Run command given in args with kwargs per shell(), but only if its
dependencies or outputs have changed or don't exist. Return tuple
of (command_line, deps_list, outputs_list) so caller or subclass
can use them.
Parallel operation keyword args "after" specifies a group or
iterable of groups to wait for after they finish, "group" specifies
the group to add this command to.
Optional "echo" keyword arg is passed to echo_command() so you can
override its output if you want.
"""
try:
return self._run(*args, **kwargs)
finally:
sys.stderr.flush()
sys.stdout.flush()
def done(self, command, deps, outputs):
""" Store the results in the .deps file when they are available """
if deps is not None or outputs is not None:
deps_dict = {}
# hash the dependency inputs and outputs
for dep in deps:
if dep in self.hash_cache:
# already hashed so don't repeat hashing work
hashed = self.hash_cache[dep]
else:
hashed = self.hasher(dep)
if hashed is not None:
deps_dict[dep] = "input-" + hashed
# store hash in hash cache as it may be a new file
self.hash_cache[dep] = hashed
for output in outputs:
hashed = self.hasher(output)
if hashed is not None:
deps_dict[output] = "output-" + hashed
# update hash cache as this file should already be in
# there but has probably changed
self.hash_cache[output] = hashed
self.deps[command] = deps_dict
return command, deps, outputs
def memoize(self, command, **kwargs):
""" Run the given command, but only if its dependencies have changed --
like run(), but returns the status code instead of raising an
exception on error. If "command" is a string (as per memoize.py)
it's split into args using shlex.split() in a POSIX/bash style,
otherwise it's a list of args as per run().
This function is for compatiblity with memoize.py and is
deprecated. Use run() instead. """
if isinstance(command, basestring):
args = shlex.split(command)
else:
args = args_to_list(command)
try:
self.run(args, **kwargs)
return 0
except ExecutionError, exc:
message, data, status = exc
return status
def outofdate(self, func):
""" Return True if given build function is out of date. """
self.checking = True
self.outofdate_flag = False
func()
self.checking = False
return self.outofdate_flag
def cmdline_outofdate(self, command):
""" Return True if given command line is out of date. """
if command in self.deps:
# command has been run before, see if deps have changed
for dep, oldhash in self.deps[command].items():
assert oldhash.startswith('input-') or \
oldhash.startswith('output-'), \
"%s file corrupt, do a clean!" % self.depsname
io_type, oldhash = oldhash.split('-', 1)
# make sure this dependency or output hasn't changed
if dep in self.hash_cache:
# already hashed so don't repeat hashing work
newhash = self.hash_cache[dep]
else:
# not in hash_cache so make sure this dependency or
# output hasn't changed
newhash = self.hasher(dep)
if newhash is not None:
# Add newhash to the hash cache
self.hash_cache[dep] = newhash
if newhash is None:
self.echo_debug("rebuilding %r, %s %s doesn't exist" %
(command, io_type, dep))
break
if newhash != oldhash and (not self.inputs_only or io_type == 'input'):
self.echo_debug("rebuilding %r, hash for %s %s (%s) != old hash (%s)" %
(command, io_type, dep, newhash, oldhash))
break
else:
# all dependencies are unchanged
return False
else:
self.echo_debug('rebuilding %r, no dependency data' % command)
# command has never been run, or one of the dependencies didn't
# exist or had changed
return True
def autoclean(self):
""" Automatically delete all outputs of this build as well as the .deps
file. """
# first build a list of all the outputs from the .deps file
outputs = []
dirs = []
for command, deps in self.deps.items():
outputs.extend(dep for dep, hashed in deps.items()
if hashed.startswith('output-'))
outputs.append(self.depsname)
self._deps = None
for output in outputs:
try:
os.remove(output)
except OSError, e:
if os.path.isdir(output):
# cache directories to be removed once all other outputs
# have been removed, as they may be content of the dir
dirs.append(output)
else:
self.echo_delete(output, e)
else:
self.echo_delete(output)
# delete the directories in reverse sort order
# this ensures that parents are removed after children
for dir in sorted(dirs, reverse=True):
try:
os.rmdir(dir)
except OSError, e:
self.echo_delete(dir, e)
else:
self.echo_delete(dir)
@property
def deps(self):
""" Lazy load .deps file so that instantiating a Builder is "safe". """
if not hasattr(self, '_deps') or self._deps is None:
self.read_deps()
atexit.register(self.write_deps, depsname=os.path.abspath(self.depsname))
return self._deps
def read_deps(self):
""" Read dependency JSON file into deps object. """
try:
f = open(self.depsname)
try:
self._deps = json.load(f)
# make sure the version is correct
if self._deps.get('.deps_version', 0) != deps_version:
printerr('Bad %s dependency file version! Rebuilding.'
% self.depsname)
self._deps = {}
self._deps.pop('.deps_version', None)
finally:
f.close()
except IOError:
self._deps = {}
def write_deps(self, depsname=None):
""" Write out deps object into JSON dependency file. """
if self._deps is None:
return # we've cleaned so nothing to save
self.deps['.deps_version'] = deps_version
if depsname is None:
depsname = self.depsname
f = open(depsname, 'w')
try:
json.dump(self.deps, f, indent=4, sort_keys=True)
finally:
f.close()
self._deps.pop('.deps_version', None)
_runner_map = {
'atimes_runner' : AtimesRunner,
'strace_runner' : StraceRunner,
'always_runner' : AlwaysRunner,
'smart_runner' : SmartRunner,
}
def set_runner(self, runner):
"""Set the runner for this builder. "runner" is either a Runner
subclass (e.g. SmartRunner), or a string selecting one of the
standard runners ("atimes_runner", "strace_runner",
"always_runner", or "smart_runner")."""
try:
self.runner = self._runner_map[runner](self)
except KeyError:
if isinstance(runner, basestring):
# For backwards compatibility, allow runner to be the
# name of a method in a derived class:
self.runner = getattr(self, runner)
else:
# pass builder to runner class to get a runner instance
self.runner = runner(self)
def _is_relevant(self, fullname):
""" Return True if file is in the dependency search directories. """
# need to abspath to compare rel paths with abs
fullname = os.path.abspath(fullname)
for path in self.dirs:
path = os.path.abspath(path)
if fullname.startswith(path):
rest = fullname[len(path):]
# files in dirs starting with ignoreprefix are not relevant
if os.sep+self.ignoreprefix in os.sep+os.path.dirname(rest):
continue
# files deeper than dirdepth are not relevant
if rest.count(os.sep) > self.dirdepth:
continue
return True
return False
def _join_results_handler(self):
"""Stops then joins the results handler thread"""
_stop_results.set()
_results.join()
# default Builder instance, used by helper run() and main() helper functions
default_builder = None
default_command = 'build'
# save the setup arguments for use by main()
_setup_builder = None
_setup_default = None
_setup_kwargs = {}
def setup(builder=None, default=None, **kwargs):
""" NOTE: setup functionality is now in main(), setup() is kept for
backward compatibility and should not be used in new scripts.
Setup the default Builder (or an instance of given builder if "builder"
is not None) with the same keyword arguments as for Builder().
"default" is the name of the default function to run when the build
script is run with no command line arguments. """
global _setup_builder, _setup_default, _setup_kwargs
_setup_builder = builder
_setup_default = default
_setup_kwargs = kwargs
setup.__doc__ += '\n\n' + Builder.__init__.__doc__
def _set_default_builder():
""" Set default builder to Builder() instance if it's not yet set. """
global default_builder
if default_builder is None:
default_builder = Builder()
def run(*args, **kwargs):
""" Run the given command, but only if its dependencies have changed. Uses
the default Builder. Return value as per Builder.run(). If there is
only one positional argument which is an iterable treat each element
as a command, returns a list of returns from Builder.run().
"""
_set_default_builder()
if len(args) == 1 and hasattr(args[0], '__iter__'):
return [default_builder.run(*a, **kwargs) for a in args[0]]
return default_builder.run(*args, **kwargs)
def after(*args):
""" wait until after the specified command groups complete and return
results, or None if not parallel """
_set_default_builder()
if getattr(default_builder, 'parallel_ok', False):
if len(args) == 0:
args = _groups.ids() # wait on all
cond = threading.Condition()
cond.acquire()
a = _after(args, cond)
_groups.add(False, a)
cond.wait()
if not a.done:
sys.exit(1)
results = []
ids = _groups.ids()
for a in args:
if a in ids and a is not False:
r = []
for i in _groups.item_list(a):
r.append(i.results)
results.append((a,r))
return results
else:
return None
def autoclean():
""" Automatically delete all outputs of the default build. """
_set_default_builder()
default_builder.autoclean()
def memoize(command, **kwargs):
_set_default_builder()
return default_builder.memoize(command, **kwargs)
memoize.__doc__ = Builder.memoize.__doc__
def outofdate(command):
""" Return True if given command is out of date and needs to be run. """
_set_default_builder()
return default_builder.outofdate(command)
# save options for use by main() if parse_options called earlier by user script
_parsed_options = None
# default usage message
_usage = '[options] build script functions to run'
def parse_options(usage=_usage, extra_options=None, command_line=None):
""" Parse command line options and return (parser, options, args). """
parser = optparse.OptionParser(usage='Usage: %prog '+usage,
version='%prog '+__version__)
parser.disable_interspersed_args()
parser.add_option('-t', '--time', action='store_true',
help='use file modification times instead of MD5 sums')
parser.add_option('-d', '--dir', action='append',
help='add DIR to list of relevant directories')
parser.add_option('-c', '--clean', action='store_true',
help='autoclean build outputs before running')
parser.add_option('-q', '--quiet', action='store_true',
help="don't echo commands, only print errors")
parser.add_option('-D', '--debug', action='store_true',
help="show debug info (why commands are rebuilt)")
parser.add_option('-k', '--keep', action='store_true',
help='keep temporary strace output files')
parser.add_option('-j', '--jobs', type='int',
help='maximum number of parallel jobs')
if extra_options:
# add any user-specified options passed in via main()
for option in extra_options:
parser.add_option(option)
if command_line is not None:
options, args = parser.parse_args(command_line)
else:
options, args = parser.parse_args()
_parsed_options = (parser, options, args)
return _parsed_options
def fabricate_version(min=None, max=None):
""" If min is given, assert that the running fabricate is at least that
version or exit with an error message. If max is given, assert that
the running fabricate is at most that version. Return the current
fabricate version string. This function was introduced in v1.14;
for prior versions, the version string is available only as module
local string fabricate.__version__ """
if min is not None and float(__version__) < min:
sys.stderr.write(("fabricate is version %s. This build script "
"requires at least version %.2f") % (__version__, min))
sys.exit()
if max is not None and float(__version__) > max:
sys.stderr.write(("fabricate is version %s. This build script "
"requires at most version %.2f") % (__version__, max))
sys.exit()
return __version__
def main(globals_dict=None, build_dir=None, extra_options=None, builder=None,
default=None, jobs=1, command_line=None, **kwargs):
""" Run the default function or the function(s) named in the command line
arguments. Call this at the end of your build script. If one of the
functions returns nonzero, main will exit with the last nonzero return
value as its status code.
"builder" is the class of builder to create, default (None) is the
normal builder
"command_line" is an optional list of command line arguments that can
be used to prevent the default parsing of sys.argv. Used to intercept
and modify the command line passed to the build script.
"default" is the default user script function to call, None = 'build'
"extra_options" is an optional list of options created with
optparse.make_option(). The pseudo-global variable main.options
is set to the parsed options list.
"kwargs" is any other keyword arguments to pass to the builder """
global default_builder, default_command, _pool
kwargs.update(_setup_kwargs)
if _parsed_options is not None:
parser, options, actions = _parsed_options
else:
parser, options, actions = parse_options(extra_options=extra_options, command_line=command_line)
kwargs['quiet'] = options.quiet
kwargs['debug'] = options.debug
if options.time:
kwargs['hasher'] = mtime_hasher
if options.dir:
kwargs['dirs'] = options.dir
if options.keep:
StraceRunner.keep_temps = options.keep
main.options = options
if options.jobs is not None:
jobs = options.jobs
if default is not None:
default_command = default
if default_command is None:
default_command = _setup_default
if not actions:
actions = [default_command]
original_path = os.getcwd()
if None in [globals_dict, build_dir]:
try:
frame = sys._getframe(1)
except:
printerr("Your Python version doesn't support sys._getframe(1),")
printerr("call main(globals(), build_dir) explicitly")
sys.exit(1)
if globals_dict is None:
globals_dict = frame.f_globals
if build_dir is None:
build_file = frame.f_globals.get('__file__', None)
if build_file:
build_dir = os.path.dirname(build_file)
if build_dir:
if not options.quiet and os.path.abspath(build_dir) != original_path:
print "Entering directory '%s'" % build_dir
os.chdir(build_dir)
if _pool is None and jobs > 1:
_pool = multiprocessing.Pool(jobs)
use_builder = Builder
if _setup_builder is not None:
use_builder = _setup_builder
if builder is not None:
use_builder = builder
default_builder = use_builder(**kwargs)
if options.clean:
default_builder.autoclean()
status = 0
try:
for action in actions:
if '(' not in action:
action = action.strip() + '()'
name = action.split('(')[0].split('.')[0]
if name in globals_dict:
this_status = eval(action, globals_dict)
if this_status:
status = int(this_status)
else:
printerr('%r command not defined!' % action)
sys.exit(1)
after() # wait till the build commands are finished
except ExecutionError, exc:
message, data, status = exc
printerr('fabricate: ' + message)
finally:
_stop_results.set() # stop the results gatherer so I don't hang
if not options.quiet and os.path.abspath(build_dir) != original_path:
print "Leaving directory '%s' back to '%s'" % (build_dir, original_path)
os.chdir(original_path)
sys.exit(status)
if __name__ == '__main__':
# if called as a script, emulate memoize.py -- run() command line
parser, options, args = parse_options('[options] command line to run')
status = 0
if args:
status = memoize(args)
elif not options.clean:
parser.print_help()
status = 1
# autoclean may have been used
sys.exit(status)
| 65,637 | 40.075094 | 115 | py |
GEL | GEL-master/src/PyGEL/pygel3d/graph.py | """ This module provides a Graph class and functionality for skeletonization using graphs. """
from pygel3d import hmesh, lib_py_gel, IntVector
import ctypes as ct
import numpy as np
class Graph:
""" This class is for representing graphs embedded in 3D. The class does not in
itself come with many features: it contains methods for creating, accessing, and
housekeeping. When vertices are used as parameters in the functions below, we usually
use the parameter name n (for node). n is simply an index (i.e. an integer) that
refers to a node (aka vertex)."""
def __init__(self,orig=None):
if orig == None:
self.obj = lib_py_gel.Graph_new()
else:
self.obj = lib_py_gel.Graph_copy(orig.obj)
def __del__(self):
lib_py_gel.Graph_delete(self.obj)
def clear(self):
""" Clear the graph. """
lib_py_gel.Graph_clear(self.obj)
def cleanup(self):
""" Cleanup reorders the graph nodes such that there is no
gap in the index range. """
lib_py_gel.Graph_cleanup(self.obj)
def nodes(self):
""" Get all nodes as an iterable range """
nodes = IntVector()
lib_py_gel.Graph_nodes(self.obj, nodes.obj)
return nodes
def neighbors(self, n, mode='n'):
""" Get the neighbors of node n. The final argument is either 'n' or 'e'. If it is 'n'
the function returns all neighboring nodes, and if it is 'e' it returns incident edges."""
nbors = IntVector()
lib_py_gel.Graph_neighbors(self.obj, n, nbors.obj, ct.c_char(mode.encode('ascii')))
return nbors
def positions(self):
""" Get the vertex positions by reference. You can assign to the
positions. """
pos = ct.POINTER(ct.c_double)()
n = lib_py_gel.Graph_positions(self.obj, ct.byref(pos))
return np.ctypeslib.as_array(pos,(n,3))
def average_edge_length(self):
""" Returns the average edge length. """
ael = lib_py_gel.Graph_average_edge_length(self.obj)
return ael
def add_node(self, p):
""" Adds node with position p to the graph and returns the
index of the new node. """
return lib_py_gel.Graph_add_node(self.obj, np.array(p))
def remove_node(self, n):
""" Removes the node n passed as argument. This does not change
any indices of other nodes, but n is then invalid. """
lib_py_gel.Graph_remove_node(self.obj, n)
def node_in_use(self, n):
""" Checks if n is in_use. This function returns false both
if n has been removed and if n is an index outside the range of
indices that are used. """
return lib_py_gel.Graph_node_in_use(self.obj, n)
def connect_nodes(self, n0, n1):
""" Creates a new edge connecting nodes n0 and n1. The index of
the new edge is returned. """
return lib_py_gel.Graph_connect_nodes(self.obj, n0, n1)
def disconnect_nodes(self, n0, n1):
""" Disconect nodes n0 and n1"""
lib_py_gel.Graph_disconnect_nodes(self.obj, n0, n1)
def merge_nodes(self, n0, n1, avg_pos):
""" Merge nodes n0 and n1. avg_pos indicates if you want the position to be the average. """
lib_py_gel.Graph_merge_nodes(self.obj, n0, n1, avg_pos)
def from_mesh(m):
""" Creates a graph from a mesh. The argument, m, is the input mesh,
and the function returns a graph with the same vertices and edges
as m."""
g = Graph()
lib_py_gel.graph_from_mesh(m.obj, g.obj)
return g
def load(fn):
""" Load a graph from a file. The argument, fn, is the filename which
is in a special format similar to Wavefront obj. The loaded graph is
returned by the function - or None if loading failed. """
s = ct.c_char_p(fn.encode('utf-8'))
g = Graph()
if lib_py_gel.graph_load(g.obj, s):
return g
return None
def save(fn, g):
""" Save graph to a file. The first argument, fn, is the file name,
and g is the graph. This function returns True if saving happened and
False otherwise. """
s = ct.c_char_p(fn.encode('utf-8'))
return lib_py_gel.graph_save(g.obj, s)
def to_mesh_cyl(g, fudge):
""" Creates a Manifold mesh from the graph. The first argument, g, is the
graph we want converted, and fudge is a constant that is used to increase the radius
of every node. This is useful if the radii are 0. """
m = hmesh.Manifold()
lib_py_gel.graph_to_mesh_cyl(g.obj, m.obj, fudge)
return m
def smooth(g, iter=1, alpha=1.0):
""" Simple Laplacian smoothing of a graph. The first argument is the Graph, g, iter
is the number of iterations, and alpha is the weight. If the weight is high,
each iteration causes a lot of smoothing, and a high number of iterations
ensures that the effect of smoothing diffuses throughout the graph, i.e. that the
effect is more global than local. """
lib_py_gel.graph_smooth(g.obj, iter, alpha)
def edge_contract(g, dist_thresh):
""" Simplifies a graph by contracting edges. The first argument, g, is the graph,
and only edges shorter than dist_thresh are contracted. When an edge is contracted
the merged vertices are moved to the average of their former positions. Thus,
the ordering in which contractions are carried out matters. Hence, edges are
contracted in the order of increasing length and edges are only considered if
neither end point is the result of a contraction, but the process is then repeated
until no more contractions are possible. Returns total number of contractions. """
return lib_py_gel.graph_edge_contract(g.obj, dist_thresh)
def prune(g):
""" Prune leaves of a graph. The graph, g, is passed as the argument. This function
removes leaf nodes (valency 1) whose only neighbour has valency > 2. In practice
such isolated leaves are frequently spurious if the graph is a skeleton. Does not
return a value. """
lib_py_gel.graph_prune(g.obj)
def LS_skeleton(g, sampling=True):
""" Skeletonize a graph using the local separators approach. The first argument,
g, is the graph, and, sampling indicates whether we try to use all vertices
(False) as starting points for finding separators or just a sampling (True).
The function returns a new graph which is the skeleton of the input graph. """
skel = Graph()
mapping = IntVector()
lib_py_gel.graph_LS_skeleton(g.obj, skel.obj, mapping.obj, sampling)
return skel
def LS_skeleton_and_map(g, sampling=True):
""" Skeletonize a graph using the local separators approach. The first argument,
g, is the graph, and, sampling indicates whether we try to use all vertices
(False) as starting points for finding separators or just a sampling (True).
The function returns a tuple containing a new graph which is the skeleton of
the input graph and a map from the graph nodes to the skeletal nodes. """
skel = Graph()
mapping = IntVector()
lib_py_gel.graph_LS_skeleton(g.obj, skel.obj, mapping.obj, sampling)
return skel, mapping
def front_skeleton_and_map(g, colors):
""" Skeletonize a graph using the front separators approach. The first argument,
g, is the graph, and, colors is a 2D array where each row contains a sequence
of floating point values - one for each node. We can have as many rows as needed
for the front separator computation. We can think of this as a coloring
of the nodes, hence the name. In practice, a coloring might just be the x-coordinate
of the nodes or some other function that indicates something about the structure of the
graph. The function returns a tuple containing a new graph which is the
skeleton of the input graph and a map from the graph nodes to the skeletal nodes. """
skel = Graph()
mapping = IntVector()
colors_flat = np.asarray(colors, dtype=np.float64)
N_col = colors_flat.shape[0]
lib_py_gel.graph_front_skeleton(g.obj, skel.obj, mapping.obj, N_col, colors_flat.ctypes.data_as(ct.POINTER(ct.c_double)))
return skel, mapping
| 8,162 | 48.174699 | 125 | py |
GEL | GEL-master/src/PyGEL/pygel3d/jupyter_display.py | """ This is a module with a function, display, that provides functionality for displaying a
Manifold or a Graph as an interactive 3D model in a Jupyter Notebook. """
from pygel3d import hmesh, graph
from numpy import array
import plotly.graph_objs as go
import plotly.offline as py
EXPORT_MODE = False
def set_export_mode(_exp_mode=True):
""" Calling this function will set export mode to true. It is necessary
to do so if we wish to export a notebook containing interactive
plotly graphics (made with display below) to HTML. In other words, this function
should not necessarily be called in normal usage but only when we export to HTML. It is
then called once in the beginning of the notebook. However, as a bit of a twist on
this story, it appears that if we don't call this function, any call to display must
be the last thing that happens in a cell. So, maybe it is best to always call
set_export_mode in the beginning of a notebook.
"""
global EXPORT_MODE
EXPORT_MODE=_exp_mode
if EXPORT_MODE:
py.init_notebook_mode(connected=False)
def display(m,wireframe=True,smooth=True,data=None):
""" The display function shows an interactive presentation of the Manifold, m, inside
a Jupyter Notebook. wireframe=True means that a wireframe view of the mesh is
superimposed on the 3D model. If smooth=True, the mesh is rendered with vertex
normals. Otherwise, the mesh is rendered with face normals. If data=None, the
mesh is shown in a light grey color. If data contains an array of scalar values
per vertex, these are mapped to colors used to color the mesh. Finally, note that
m can also be a Graph. In that case the display function just draws the edges as
black lines. """
mesh_data = []
if isinstance(m,hmesh.Manifold):
xyz = array([ p for p in m.positions()])
m_tri = hmesh.Manifold(m)
hmesh.triangulate(m_tri)
ijk = array([[ idx for idx in m_tri.circulate_face(f,'v')] for f in m_tri.faces()])
mesh = go.Mesh3d(x=xyz[:,0],y=xyz[:,1],z=xyz[:,2],
i=ijk[:,0],j=ijk[:,1],k=ijk[:,2],color='#dddddd',flatshading=not smooth)
if data is not None:
mesh['intensity'] = data
mesh_data += [mesh]
if wireframe:
pos = m.positions()
xyze = []
for h in m.halfedges():
if h < m.opposite_halfedge(h):
p0 = pos[m.incident_vertex(m.opposite_halfedge(h))]
p1 = pos[m.incident_vertex(h)]
xyze.append(array(p0))
xyze.append(array(p1))
xyze.append(array([None, None, None]))
xyze = array(xyze)
trace1=go.Scatter3d(x=xyze[:,0],y=xyze[:,1],z=xyze[:,2],
mode='lines',
line=dict(color='rgb(125,0,0)', width=1),
hoverinfo='none')
mesh_data += [trace1]
elif isinstance(m,graph.Graph):
pos = m.positions()
xyze = []
for v in m.nodes():
for w in m.neighbors(v):
if v < w:
p0 = pos[v]
p1 = pos[w]
xyze.append(array(p0))
xyze.append(array(p1))
xyze.append(array([None, None, None]))
xyze = array(xyze)
trace1=go.Scatter3d(x=xyze[:,0],y=xyze[:,1],z=xyze[:,2],
mode='lines',
line=dict(color='rgb(0,0,0)', width=1),
hoverinfo='none')
mesh_data += [trace1]
lyt = go.Layout(width=850,height=800)
lyt.scene.aspectmode="data"
if EXPORT_MODE:
py.iplot(dict(data=mesh_data,layout=lyt))
else:
return go.FigureWidget(mesh_data,lyt)
| 3,849 | 43.252874 | 91 | py |
GEL | GEL-master/src/PyGEL/pygel3d/hmesh.py | """ The hmesh module provides an halfedge based mesh representation."""
import ctypes as ct
import numpy as np
from numpy.linalg import norm
from pygel3d import lib_py_gel, IntVector, Vec3dVector, spatial
from scipy.sparse import csc_matrix, vstack
from scipy.sparse.linalg import lsqr
from collections import defaultdict
class Manifold:
""" The Manifold class represents a halfedge based mesh. It is maybe a bit grand to call
a mesh class Manifold, but meshes based on the halfedge representation are manifold (if we
ignore a few corner cases) unlike some other representations. This class contains a number of
methods for mesh manipulation and inspection. Note also that numerous further functions are
available to manipulate meshes stored as Manifolds.
Many of the functions below accept arguments called hid, fid, or vid. These are simply indices
of halfedges, faces and vertices, respectively: integer numbers that identify the corresponding
mesh element. Using a plain integer to identify a mesh entity means that, for instance, a
vertex index can also be used as an index into, say, a NumPy array without any conversion.
"""
def __init__(self,orig=None):
if orig == None:
self.obj = lib_py_gel.Manifold_new()
else:
self.obj = lib_py_gel.Manifold_copy(orig.obj)
@classmethod
def from_triangles(cls,vertices, faces):
""" Given a list of vertices and triangles (faces), this function produces
a Manifold mesh."""
m = cls()
m.obj = lib_py_gel.Manifold_from_triangles(len(vertices),len(faces),np.array(vertices,dtype=np.float64), np.array(faces,dtype=ct.c_int))
return m
@classmethod
def from_points(cls,pts,xaxis=np.array([1,0,0]),yaxis=np.array([0,1,0])):
""" This function computes the Delaunay triangulation of pts. You need
to specify xaxis and yaxis if they are not canonical. The function returns
a Manifold with the resulting triangles. Clearly, this function will
give surprising results if the surface represented by the points is not
well represented as a 2.5D surface, aka a height field. """
m = cls()
m.obj = lib_py_gel.Manifold_from_points(len(pts),np.array(pts,dtype=np.float64), np.array(xaxis,dtype=np.float64), np.array(yaxis,dtype=np.float64))
return m
def __del__(self):
lib_py_gel.Manifold_delete(self.obj)
def add_face(self,pts):
""" Add a face to the Manifold.
This function takes a list of 3D points, pts, as argument and creates a face
in the mesh with those points as vertices. The function returns the index
of the created face.
"""
return lib_py_gel.Manifold_add_face(self.obj, len(pts), np.array(pts))
def positions(self):
""" Retrieve an array containing the vertex positions of the Manifold.
It is not a copy: any changes are made to the actual vertex positions. """
pos = ct.POINTER(ct.c_double)()
n = lib_py_gel.Manifold_positions(self.obj, ct.byref(pos))
return np.ctypeslib.as_array(pos,(n,3))
def no_allocated_vertices(self):
""" Number of vertices.
This number could be higher than the number of actually
used vertices, but corresponds to the size of the array allocated
for vertices."""
return lib_py_gel.Manifold_no_allocated_vertices(self.obj)
def no_allocated_faces(self):
""" Number of faces.
This number could be higher than the number of actually
used faces, but corresponds to the size of the array allocated
for faces."""
return lib_py_gel.Manifold_no_allocated_faces(self.obj)
def no_allocated_halfedges(self):
""" Number of halfedges.
This number could be higher than the number of actually
used halfedges, but corresponds to the size of the array allocated
for halfedges."""
return lib_py_gel.Manifold_no_allocated_halfedges(self.obj)
def vertices(self):
""" Returns an iterable containing all vertex indices"""
verts = IntVector()
n = lib_py_gel.Manifold_vertices(self.obj, verts.obj)
return verts
def faces(self):
""" Returns an iterable containing all face indices"""
faces = IntVector()
n = lib_py_gel.Manifold_faces(self.obj, faces.obj)
return faces
def halfedges(self):
""" Returns an iterable containing all halfedge indices"""
hedges = IntVector()
n = lib_py_gel.Manifold_halfedges(self.obj, hedges.obj)
return hedges
def circulate_vertex(self, vid, mode='v'):
""" Circulate a vertex. Passed a vertex index, vid, and second argument,
mode='f', this function will return an iterable with all faces incident
on vid arranged in counter clockwise order. Similarly, if mode is 'h',
incident halfedges (outgoing) are returned, and for mode = 'v', all
neighboring vertices are returned. """
nbrs = IntVector()
n = lib_py_gel.Manifold_circulate_vertex(self.obj, vid, ct.c_char(mode.encode('ascii')), nbrs.obj)
return nbrs
def circulate_face(self, fid, mode='v'):
""" Circulate a face. Passed a face index, fid, and second argument,
mode='f', this function will return an iterable with all faces that
share an edge with fid (in counter clockwise order). If the argument is
mode='h', the halfedges themselves are returned. For mode='v', the
incident vertices of the face are returned. """
nbrs = IntVector()
n = lib_py_gel.Manifold_circulate_face(self.obj, fid, ct.c_char(mode.encode('ascii')), nbrs.obj)
return nbrs
def next_halfedge(self,hid):
""" Returns next halfedge to hid. """
return lib_py_gel.Walker_next_halfedge(self.obj, hid)
def prev_halfedge(self,hid):
""" Returns previous halfedge to hid. """
return lib_py_gel.Walker_prev_halfedge(self.obj, hid)
def opposite_halfedge(self,hid):
""" Returns opposite halfedge to hid. """
return lib_py_gel.Walker_opposite_halfedge(self.obj, hid)
def incident_face(self,hid):
""" Returns face corresponding to hid. """
return lib_py_gel.Walker_incident_face(self.obj, hid)
def incident_vertex(self,hid):
""" Returns vertex corresponding to (or pointed to by) hid. """
return lib_py_gel.Walker_incident_vertex(self.obj, hid)
def remove_vertex(self,vid):
""" Remove vertex vid from the Manifold. This function merges all faces
around the vertex into one and then removes this resulting face. """
return lib_py_gel.Manifold_remove_vertex(self.obj, vid)
def remove_face(self,fid):
""" Removes a face, fid, from the Manifold. If it is an interior face it is
simply replaced by an invalid index. If the face contains boundary
edges, these are removed. Situations may arise where the mesh is no
longer manifold because the situation at a boundary vertex is not
homeomorphic to a half disk. This, we can probably ignore since from the
data structure point of view it is not really a problem that a vertex is
incident on two holes - a hole can be seen as a special type of face.
The function returns false if the index of the face is not valid,
otherwise the function must complete. """
return lib_py_gel.Manifold_remove_face(self.obj, fid)
def remove_edge(self,hid):
""" Remove an edge, hid, from the Manifold. This function will remove the
faces on either side and the edge itself in the process. Thus, it is a
simple application of remove_face. """
return lib_py_gel.Manifold_remove_edge(self.obj, hid)
def vertex_in_use(self,vid):
""" check if vertex, vid, is in use. This function returns true if the id corresponds
to a vertex that is currently in the mesh and false otherwise. vid could
be outside the range of used ids and it could also correspond to a vertex
which is not active. The function returns false in both cases. """
return lib_py_gel.Manifold_vertex_in_use(self.obj, vid)
def face_in_use(self,fid):
""" check if face, fid, is in use. This function returns true if the id corresponds
to a face that is currently in the mesh and false otherwise. fid could
be outside the range of used ids and it could also correspond to a face
which is not active. The function returns false in both cases. """
return lib_py_gel.Manifold_face_in_use(self.obj, fid)
def halfedge_in_use(self,hid):
""" check if halfedge hid is in use. This function returns true if the id corresponds
to a halfedge that is currently in the mesh and false otherwise. hid could
be outside the range of used ids and it could also correspond to a halfedge
which is not active. The function returns false in both cases. """
return lib_py_gel.Manifold_halfedge_in_use(self.obj, hid)
def flip_edge(self,hid):
""" Flip the edge, hid, separating two faces. The function first verifies that
the edge is flippable. This entails making sure that all of the
following are true.
1. adjacent faces are triangles.
2. neither end point has valency three or less.
3. the vertices that will be connected are not already.
If the tests are passed, the flip is performed and the function
returns True. Otherwise False."""
return lib_py_gel.Manifold_flip_edge(self.obj,hid)
def collapse_edge(self,hid, avg_vertices=False):
""" Collapse an edge hid.
Before collapsing hid, a number of tests are made:
---
1. For the two vertices adjacent to the edge, we generate a list of all their neighbouring vertices.
We then generate a list of the vertices that occur in both these lists.
That is, we find all vertices connected by edges to both endpoints of the edge and store these in a list.
2. For both faces incident on the edge, check whether they are triangular.
If this is the case, the face will be removed, and it is ok that the the third vertex is connected to both endpoints.
Thus the third vertex in such a face is removed from the list generated in 1.
3. If the list is now empty, all is well.
Otherwise, there would be a vertex in the new mesh with two edges connecting it to the same vertex. Return false.
4. TETRAHEDRON TEST:
If the valency of both vertices is three, and the incident faces are triangles, we also disallow the operation.
Reason: A vertex valency of two and two triangles incident on the adjacent vertices makes the construction collapse.
5. VALENCY 4 TEST:
If a triangle is adjacent to the edge being collapsed, it disappears.
This means the valency of the remaining edge vertex is decreased by one.
A valency two vertex reduced to a valency one vertex is considered illegal.
6. PREVENT MERGING HOLES:
Collapsing an edge with boundary endpoints and valid faces results in the creation where two holes meet.
A non manifold situation. We could relax this...
7. New test: if the same face is in the one-ring of both vertices but not adjacent to the common edge,
then the result of a collapse would be a one ring where the same face occurs twice. This is disallowed as the resulting
face would be non-simple.
If the tests are passed, the collapse is performed and the function
returns True. Otherwise False."""
return lib_py_gel.Manifold_collapse_edge(self.obj, hid, avg_vertices)
def split_face_by_edge(self,fid,v0,v1):
""" Split a face. The face, fid, is split by creating an edge with
endpoints v0 and v1 (the next two arguments). The vertices of the old
face between v0 and v1 (in counter clockwise order) continue to belong
to fid. The vertices between v1 and v0 belong to the new face. A handle to
the new face is returned. """
return lib_py_gel.Manifold_split_face_by_edge(self.obj, fid, v0, v1)
def split_face_by_vertex(self,fid):
""" Split a polygon, fid, by inserting a vertex at the barycenter. This
function is less likely to create flipped triangles than the
split_face_triangulate function. On the other hand, it introduces more
vertices and probably makes the triangles more acute. The vertex id of the
inserted vertex is returned. """
return lib_py_gel.Manifold_split_face_by_vertex(self.obj,fid)
def split_edge(self,hid):
""" Insert a new vertex on halfedge hid. The new halfedge is insterted
as the previous edge to hid. The vertex id of the inserted vertex is returned. """
return lib_py_gel.Manifold_split_edge(self.obj,hid)
def stitch_boundary_edges(self,h0,h1):
""" Stitch two halfedges. Two boundary halfedges, h0 and h1, can be stitched
together. This can be used to build a complex mesh from a bunch of
simple faces. """
return lib_py_gel.Manifold_stitch_boundary_edges(self.obj, h0, h1)
def merge_faces(self,hid):
""" Merges two faces into a single polygon. The merged faces are those shared
by the edge for which hid is one of the two corresponding halfedges. This function returns
true if the merging was possible and false otherwise. Currently merge
only fails if the mesh is already illegal. Thus it should, in fact,
never fail. """
if self.is_halfedge_at_boundary(hid):
return False
fid = self.incident_face(hid)
return lib_py_gel.Manifold_merge_faces(self.obj, fid, hid)
def close_hole(self,hid):
""" Close hole given by hid (i.e. the face referenced by hid). Returns
index of the created face or the face that was already there if, in
fact, hid was not next to a hole. """
return lib_py_gel.Manifold_close_hole(self.obj, hid)
def cleanup(self):
""" Remove unused items from Mesh. This function remaps all vertices, halfedges
and faces such that the arrays do not contain any holes left by unused mesh
entities. It is a good idea to call this function when a mesh has been simplified
or changed in other ways such that mesh entities have been removed. However, note
that it invalidates any attributes that you might have stored in auxilliary arrays."""
lib_py_gel.Manifold_cleanup(self.obj)
def is_halfedge_at_boundary(self, hid):
""" Returns True if hid is a boundary halfedge, i.e. face on either
side is invalid. """
return lib_py_gel.is_halfedge_at_boundary(self.obj, hid)
def is_vertex_at_boundary(self, vid):
""" Returns True if vid lies on a boundary. """
return lib_py_gel.is_vertex_at_boundary(self.obj, vid)
def edge_length(self, hid):
""" Returns length of edge given by halfedge hid which is passed as argument. """
return lib_py_gel.length(self.obj, hid)
def valency(self,vid):
""" Returns valency of vid, i.e. number of incident edges."""
return lib_py_gel.valency(self.obj,vid)
def vertex_normal(self, vid):
""" Returns vertex normal (angle weighted) of vertex given by vid """
n = (ct.c_double*3)()
lib_py_gel.vertex_normal(self.obj, vid, ct.byref(n))
return np.array([n[0],n[1],n[2]])
def connected(self, v0, v1):
""" Returns true if the two argument vertices, v0 and v1, are in each other's one-rings."""
return lib_py_gel.connected(self.obj,v0,v1)
def no_edges(self, fid):
""" Compute the number of edges of a face fid """
return lib_py_gel.no_edges(self.obj, fid)
def face_normal(self, fid):
""" Compute the normal of a face fid. If the face is not a triangle,
the normal is not defined, but computed using the first three
vertices of the face. """
n = (ct.c_double*3)()
lib_py_gel.face_normal(self.obj, fid, ct.byref(n))
return np.array([n[0],n[1],n[2]])
def area(self, fid):
""" Returns the area of a face fid. """
return lib_py_gel.area(self.obj, fid)
def perimeter(self, fid):
""" Returns the perimeter of a face fid. """
return lib_py_gel.perimeter(self.obj, fid)
def centre(self, fid):
""" Returns the centre of a face. """
v = (ct.c_double*3)()
lib_py_gel.centre(self.obj, fid, ct.byref(v))
return v
def valid(m):
"""This function performs a series of tests to check that this
is a valid manifold. This function is not rigorously constructed but seems
to catch all problems so far. The function returns true if the mesh is valid
and false otherwise. """
return lib_py_gel.valid(m.obj)
def closed(m):
""" Returns true if m is closed, i.e. has no boundary."""
return lib_py_gel.closed(m.obj)
def bbox(m):
""" Returns the min and max corners of the bounding box of Manifold m. """
pmin = (ct.c_double*3)()
pmax = (ct.c_double*3)()
lib_py_gel.bbox(m.obj, ct.byref(pmin),ct.byref(pmax))
return (np.ctypeslib.as_array(pmin,3),np.ctypeslib.as_array(pmax,3))
def bsphere(m):
""" Calculate the bounding sphere of the manifold m.
Returns centre,radius """
c = (ct.c_double*3)()
r = (ct.c_double)()
lib_py_gel.bsphere(m.obj,ct.byref(c),ct.byref(r))
return (c,r)
def stitch(m, rad=1e-30):
""" Stitch together edges of m whose endpoints coincide geometrically. This
function allows you to create a mesh as a bunch of faces and then stitch
these together to form a coherent whole. What this function adds is a
spatial data structure to find out which vertices coincide. The return value
is the number of edges that could not be stitched. Often this is because it
would introduce a non-manifold situation."""
return lib_py_gel.stitch_mesh(m.obj,rad)
def obj_save(fn, m):
""" Save Manifold m to Wavefront obj file. """
s = ct.c_char_p(fn.encode('utf-8'))
lib_py_gel.obj_save(s, m.obj)
def off_save(fn, m):
""" Save Manifold m to OFF file. """
s = ct.c_char_p(fn.encode('utf-8'))
lib_py_gel.off_save(s, m.obj)
def x3d_save(fn, m):
""" Save Manifold m to X3D file. """
s = ct.c_char_p(fn.encode('utf-8'))
lib_py_gel.x3d_save(s, m.obj)
def obj_load(fn):
""" Load and return Manifold from Wavefront obj file.
Returns None if loading failed. """
m = Manifold()
s = ct.c_char_p(fn.encode('utf-8'))
if lib_py_gel.obj_load(s, m.obj):
return m
return None
def off_load(fn):
""" Load and return Manifold from OFF file.
Returns None if loading failed."""
m = Manifold()
s = ct.c_char_p(fn.encode('utf-8'))
if lib_py_gel.off_load(s, m.obj):
return m
return None
def ply_load(fn):
""" Load and return Manifold from Stanford PLY file.
Returns None if loading failed. """
m = Manifold()
s = ct.c_char_p(fn.encode('utf-8'))
if lib_py_gel.ply_load(s, m.obj):
return m
return None
def x3d_load(fn):
""" Load and return Manifold from X3D file.
Returns None if loading failed."""
m = Manifold()
s = ct.c_char_p(fn.encode('utf-8'))
if lib_py_gel.x3d_load(s, m.obj):
return m
return None
from os.path import splitext
def load(fn):
""" Load a Manifold from an X3D/OBJ/OFF/PLY file. Return the
loaded Manifold. Returns None if loading failed."""
name, extension = splitext(fn)
if extension.lower() == ".x3d":
return x3d_load(fn)
if extension.lower() == ".obj":
return obj_load(fn)
if extension.lower() == ".off":
return off_load(fn)
if extension.lower() == ".ply":
return ply_load(fn)
return None
def save(fn, m):
""" Save a Manifold, m, to an X3D/OBJ/OFF file. """
name, extension = splitext(fn)
if extension.lower() == ".x3d":
x3d_save(fn, m)
elif extension.lower() == ".obj":
obj_save(fn, m)
elif extension.lower() == ".off":
off_save(fn, m)
def remove_caps(m, thresh=2.9):
""" Remove caps from a manifold, m, consisting of only triangles. A cap is a
triangle with two very small angles and an angle close to pi, however a cap
does not necessarily have a very short edge. Set the ang_thresh to a value
close to pi. The closer to pi the _less_ sensitive the cap removal. A cap is
removed by flipping the (long) edge E opposite to the vertex V with the
angle close to pi. However, the function is more complex. Read code and
document more carefully !!! """
lib_py_gel.remove_caps(m.obj,thresh)
def remove_needles(m, thresh=0.05, average_positions=False):
""" Remove needles from a manifold, m, consisting of only triangles. A needle
is a triangle with a single very short edge. It is moved by collapsing the
short edge. The thresh parameter sets the length threshold (in terms of the average edge length
in the mesh). If average_positions is true then the collapsed vertex is placed at the average position of the end points."""
abs_thresh = thresh * average_edge_length(m)
lib_py_gel.remove_needles(m.obj,abs_thresh, average_positions)
def close_holes(m, max_size=100):
""" This function replaces holes in m by faces. It is really a simple function
that just finds all loops of edges next to missing faces. """
lib_py_gel.close_holes(m.obj, max_size)
def flip_orientation(m):
""" Flip the orientation of a mesh, m. After calling this function, normals
will point the other way and clockwise becomes counter clockwise """
lib_py_gel.flip_orientation(m.obj)
def merge_coincident_boundary_vertices(m, rad = 1.0e-30):
""" Merge vertices of m that are boundary vertices and coincident.
However, if one belongs to the other's one ring or the one
rings share a vertex, they will not be merged. """
lib_py_gel.merge_coincident_boundary_vertices(m.obj, rad)
def minimize_curvature(m,anneal=False):
""" Minimizes mean curvature of m by flipping edges. Hence, no vertices are moved.
This is really the same as dihedral angle minimization, except that we weight by edge length. """
lib_py_gel.minimize_curvature(m.obj, anneal)
def minimize_dihedral_angle(m,max_iter=10000, anneal=False, alpha=False, gamma=4.0):
""" Minimizes dihedral angles in m by flipping edges.
Arguments:
max_iter is the maximum number of iterations for simulated annealing.
anneal tells us the code whether to apply simulated annealing
alpha=False means that we use the cosine of angles rather than true angles (faster)
gamma is the power to which the angles are raised."""
lib_py_gel.minimize_dihedral_angle(m.obj, max_iter, anneal,alpha,ct.c_double(gamma))
def maximize_min_angle(m,dihedral_thresh=0.95,anneal=False):
""" Maximizes the minimum angle of triangles by flipping edges of m. Makes the mesh more Delaunay."""
lib_py_gel.maximize_min_angle(m.obj,dihedral_thresh,anneal)
def optimize_valency(m,anneal=False):
""" Tries to achieve valence 6 internally and 4 along edges by flipping edges of m. """
lib_py_gel.optimize_valency(m.obj, anneal)
def randomize_mesh(m,max_iter=1):
""" Make random flips in m. Useful for generating synthetic test cases. """
lib_py_gel.randomize_mesh(m.obj, max_iter)
def quadric_simplify(m,keep_fraction,singular_thresh=1e-4,optimal_positions=True):
""" Garland Heckbert simplification of mesh m in our own implementation. keep_fraction
is the fraction of vertices to retain. The singular_thresh defines how small
singular values from the SVD we accept. It is relative to the greatest
singular value. If optimal_positions is true, we reposition vertices.
Otherwise the vertices are a subset of the old vertices."""
lib_py_gel.quadric_simplify(m.obj, keep_fraction, singular_thresh,optimal_positions)
def average_edge_length(m,max_iter=1):
""" Returns the average edge length of mesh m. """
return lib_py_gel.average_edge_length(m.obj)
def median_edge_length(m,max_iter=1):
""" Returns the median edge length of m"""
return lib_py_gel.median_edge_length(m.obj)
def refine_edges(m,threshold):
""" Split all edges in m which are longer
than the threshold (second arg) length. A split edge
results in a new vertex of valence two."""
return lib_py_gel.refine_edges(m.obj, threshold)
def cc_split(m):
""" Perform a Catmull-Clark split on m, i.e. a split where each face is divided
into new quadrilateral faces formed by connecting a corner with a point on
each incident edge and a point at the centre of the face."""
lib_py_gel.cc_split(m.obj)
def loop_split(m):
""" Perform a loop split on m where each edge is divided into two segments, and
four new triangles are created for each original triangle. """
lib_py_gel.loop_split(m.obj)
def root3_subdivide(m):
""" Leif Kobbelt's subdivision scheme applied to m. A vertex is placed in the
center of each face and all old edges are flipped. """
lib_py_gel.root3_subdivide(m.obj)
def rootCC_subdivide(m):
""" This subdivision scheme creates a vertex inside each original (quad) face of m,
producing four triangles. Triangles sharing an old edge are then merged.
Two steps produce something similar to Catmull-Clark. """
lib_py_gel.rootCC_subdivide(m.obj)
def butterfly_subdivide(m):
""" Butterfly subidiviosn on m. An interpolatory scheme. Creates the same connectivity as Loop. """
lib_py_gel.butterfly_subdivide(m.obj)
def cc_smooth(m):
""" If called after cc_split, this function completes a step of Catmull-Clark
subdivision of m."""
lib_py_gel.cc_smooth(m.obj)
def loop_smooth(m):
""" If called after Loop split, this function completes a step of Loop
subdivision of m. """
lib_py_gel.loop_smooth(m.obj)
def taubin_smooth(m, iter=1):
""" This function performs Taubin smoothing on the mesh m for iter number
of iterations. """
lib_py_gel.taubin_smooth(m.obj, iter)
def laplacian_smooth(m, w=0.5, iter=1):
""" This function performs Laplacian smoothing on the mesh m for iter number
of iterations. w is the weight applied. """
lib_py_gel.laplacian_smooth(m.obj, w, iter)
def volumetric_isocontouring(data, dims, bbox_min = None, bbox_max = None,
tau=0.0, make_triangles=True, high_is_inside=True):
""" Creates a polygonal mesh by dual contouring of the volumetric data. The dimensions
are given by dims, bbox_min (defaults to [0,0,0] ) and bbox_max (defaults to dims) are
the corners of the bounding box in R^3 that corresponds to the volumetric grid, tau is
the iso value (defaults to 0). If make_triangles is True (default), we turn the quads
into triangles. Finally, high_is_inside=True (default) means that values greater than
tau are interior and smaller values are exterior. """
m = Manifold()
if bbox_min is None:
bbox_min = [0,0,0]
if bbox_max is None:
bbox_max = dims
data_float = np.asarray(data.flatten(order='F'), dtype=ct.c_float)
bbox_min_d = np.asarray(bbox_min, dtype=np.float64, order='C')
bbox_max_d = np.asarray(bbox_max, dtype=np.float64, order='C')
lib_py_gel.volumetric_isocontouring(m.obj, dims[0], dims[1], dims[2],
data_float.ctypes.data_as(ct.POINTER(ct.c_float)),
bbox_min_d.ctypes.data_as(ct.POINTER(ct.c_double)),
bbox_max_d.ctypes.data_as(ct.POINTER(ct.c_double)), tau,
make_triangles, high_is_inside)
return m
def triangulate(m, clip_ear=True):
""" Turn a general polygonal mesh, m, into a triangle mesh by repeatedly
splitting a polygon into smaller polygons. """
if clip_ear:
lib_py_gel.ear_clip_triangulate(m.obj)
else:
lib_py_gel.shortest_edge_triangulate(m.obj)
def skeleton_to_feq(g, node_radii = None):
""" Turn a skeleton graph g into a Face Extrusion Quad Mesh m with given node_radii for each graph node. """
m = Manifold()
if node_radii is None:
node_radii = [0]*len(g.nodes())
node_rs_flat = np.asarray(node_radii, dtype=np.float64)
lib_py_gel.graph_to_feq(g.obj , m.obj, node_rs_flat.ctypes.data_as(ct.POINTER(ct.c_double)))
return m
def laplacian_matrix(m):
""" Returns the sparse uniform laplacian matrix for a polygonal mesh m. """
num_verts = m.no_allocated_vertices()
laplacian = np.full((num_verts,num_verts), 0.0)
for i in m.vertices():
nb_verts = m.circulate_vertex(i)
deg = len(nb_verts)
laplacian[i][i] = 1.0
for nb in nb_verts:
laplacian[i][nb] = -1/deg
return csc_matrix(laplacian)
def inv_correspondence_leqs(m, ref_mesh, dist_obj):
""" Helper function to compute correspondences between a skeletal mesh m and a reference mesh ref_mesh, given a MeshDistance object dist_obj for the ref mesh. """
v_pos = m.positions()
num_verts = m.no_allocated_vertices()
control_points = []
close_pts = []
weights = []
cp_add_flag = 0
inv_close_points = defaultdict(list)
m_tree = spatial.I3DTree()
m_tree_index_set = []
for v in m.vertices():
m_tree.insert(v_pos[v],v)
m_tree_index_set.append(v)
m_tree.build()
for v_id in ref_mesh.vertices():
closest_pt_obj = m_tree.closest_point(ref_mesh.positions()[v_id],1000)
if (closest_pt_obj is not None):
key,index = closest_pt_obj[0],closest_pt_obj[1]
normal = ref_mesh.vertex_normal(v_id)
dir_vec = v_pos[index] - ref_mesh.positions()[v_id]
if((np.linalg.norm(dir_vec,2)*np.linalg.norm(normal,2)) == 0):
continue
else:
dot_val = np.dot(dir_vec,normal)/(np.linalg.norm(dir_vec,2)*np.linalg.norm(normal,2))
inv_close_points[index].append((v_id,dot_val))
for i in m.vertices():
curr_v = v_pos[i]
normal = m.vertex_normal(i)
cp_add_flag = 0
if(i in inv_close_points.keys()):
cp_cands = inv_close_points[i]
max_dot_val = 0
max_inv_dot_val = 0
max_index = -1
max_dist = 0
max_dist_index = -1
max_dist_dot_val = 0
curr_dist = dist_obj.signed_distance(curr_v)
for cp in cp_cands:
cp_pt = ref_mesh.positions()[cp[0]]
inv_dot_val = np.dot(normal , cp_pt - curr_v)/np.linalg.norm(cp_pt - curr_v,2)
if((cp[1] > 0 or inv_dot_val < 0) and curr_dist < 0):
continue
if((cp[1] < 0 or inv_dot_val > 0) and curr_dist > 0):
continue
control_points.append(i)
cp_add_flag = 1
close_pts.append(cp_pt)
weights.append((abs(cp[1])))
weight = 1.0
A = np.full((len(control_points),num_verts), 0.0)
b = np.full((len(control_points),3),0.0)
for i in range(len(control_points)):
A[i][control_points[i]] = weight*weights[i]
b[i][0] = weight*weights[i]*close_pts[i][0]
b[i][1] = weight*weights[i]*close_pts[i][1]
b[i][2] = weight*weights[i]*close_pts[i][2]
return csc_matrix(A),b
def fit_mesh_to_ref(m, ref_mesh, local_iter = 50, dist_wt = 0.25, lap_wt = 1.0):
""" Fits a skeletal mesh m to a reference mesh ref_mesh. """
v_pos = m.positions()
ref_pos = ref_mesh.positions()
max_iter = local_iter
lap_matrix = laplacian_matrix(m)
dist_obj = MeshDistance(ref_mesh)
for i in range(max_iter):
dist_wt -= 0.001
A, b = inv_correspondence_leqs(m, ref_mesh, dist_obj)
final_A = vstack([lap_wt*lap_matrix, dist_wt*A])
b_add = np.zeros((final_A.shape[0] - b.shape[0],3))
final_b = np.vstack([b_add, dist_wt*b])
opt_x, _, _, _ = lsqr(final_A, final_b[:,0])[:4]
opt_y, _, _, _ = lsqr(final_A, final_b[:,1])[:4]
opt_z, _, _, _ = lsqr(final_A, final_b[:,2])[:4]
v_pos[:,0] = opt_x
v_pos[:,1] = opt_y
v_pos[:,2] = opt_z
return m
class MeshDistance:
""" This class allows you to compute the distance from any point in space to
a Manifold (which must be triangulated). The constructor creates an instance
based on a specific mesh, and the signed_distance function computes the actual distance. """
def __init__(self,m):
self.obj = lib_py_gel.MeshDistance_new(m.obj)
def __del__(self):
lib_py_gel.MeshDistance_delete(self.obj)
def signed_distance(self,pts,upper=1e30):
""" Compute the signed distance from each point in pts to the mesh stored in
this class instance. pts should be convertible to a length N>=1 array of 3D
points. The function returns an array of N distance values with a single distance
for each point. The distance corresponding to a point is positive if the point
is outside and negative if inside. The upper parameter can be used to threshold
how far away the distance is of interest. """
p = np.reshape(np.array(pts,dtype=ct.c_float), (-1,3))
n = p.shape[0]
d = np.ndarray(n, dtype=ct.c_float)
p_ct = p.ctypes.data_as(ct.POINTER(ct.c_float))
d_ct = d.ctypes.data_as(ct.POINTER(ct.c_float))
lib_py_gel.MeshDistance_signed_distance(self.obj,n,p_ct,d_ct,upper)
return d
def ray_inside_test(self,pts,no_rays=3):
"""Check whether each point in pts is inside or outside the stored mesh by
casting rays. pts should be convertible to a length N>=1 array of 3D points.
Effectively, this is the sign of the distance. In some cases casting (multiple)
ray is more robust than using the sign computed locally. Returns an array of
N integers which are either 1 or 0 depending on whether the corresponding point
is inside (1) or outside (0). """
p = np.reshape(np.array(pts,dtype=ct.c_float), (-1,3))
n = p.shape[0]
s = np.ndarray(n, dtype=ct.c_int)
p_ct = p.ctypes.data_as(ct.POINTER(ct.c_float))
s_ct = s.ctypes.data_as(ct.POINTER(ct.c_int))
lib_py_gel.MeshDistance_ray_inside_test(self.obj,n,p_ct,s_ct,no_rays)
return s
| 34,729 | 47.505587 | 166 | py |
GEL | GEL-master/src/PyGEL/pygel3d/__init__.py | """ PyGEL is a collection of classes and functions that expose features in the
GEL library. The primary purpose of PyGEL (and GEL) is to be useful for geometry
processing tasks. Especially tasks that involve 3D polygonal meshes, but there is
also a graph component useful e.g. for skeletonization. The PyGEL package is called
pygel3d and it contains five modules:
hmesh provides Manifold which is a class that represents polygonal meshes using the
halfedge representation. hmesh also provides a slew of functions for manipulating
polygonal meshes and the MeshDistance class which makes it simple to compute the
distance to a triangle mesh.
graph contains the Graph class which is used for graphs: i.e. collections of
vertices (in 3D) connected by edges. Unlike a Manifold, a Graph does not have to
represent a surface. There are also some associated functions which may be useful:
in particular, there is the LS_skeletonize function which computes a curve skeleton
from a Graph and returns the result as a new Graph.
gl_display provides the Viewer class which makes it simple to visualize meshes and
graphs.
jupyter_display makes it easy to use PyGEL in the context of a Jupyter Notebook.
This module contains a function that allows you to create a widget for interactively
visualizing a mesh or a graph in a Notebook. The feature is based on the Plotly
library and it is possible to export the resulting notebooks to HTML while preserving
the interactive 3D graphics in the notebook.
spatial contains the I3DTree class which is simply a kD-tree specialized for mapping
3D points to integers - typically indices. Of course, scipy.spatial has a more
generic class, so this is perhaps not the most important part of PyGEL.
"""
__all__ = ["hmesh", "graph", "gl_display", "jupyter_display", "spatial"]
import ctypes as ct
import numpy as np
import os
from sys import platform,prefix
def _get_script_path():
return os.path.dirname(__file__)
def _get_lib_name():
if platform == "darwin":
return "libPyGEL.dylib"
if platform == "win32":
return "PyGEL.dll"
return "libPyGEL.so"
# Load PyGEL the Python GEL bridge library
lib_py_gel = ct.cdll.LoadLibrary(_get_script_path() + "/" + _get_lib_name())
# An InvalidIndex is just a special integer value.
InvalidIndex = ct.c_size_t.in_dll(lib_py_gel, "InvalidIndex").value
# The following many lines explitize the arguments and return types of the C API
# IntVector
lib_py_gel.IntVector_new.restype = ct.c_void_p
lib_py_gel.IntVector_get.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.IntVector_size.argtypes = (ct.c_void_p,)
lib_py_gel.IntVector_size.restype = ct.c_size_t
lib_py_gel.IntVector_delete.argtypes = (ct.c_void_p,)
# Vec3dVector
lib_py_gel.Vec3dVector_new.restype = ct.c_void_p
lib_py_gel.Vec3dVector_get.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Vec3dVector_get.restype = ct.POINTER(ct.c_double)
lib_py_gel.Vec3dVector_size.argtypes = (ct.c_void_p,)
lib_py_gel.Vec3dVector_size.restype = ct.c_size_t
lib_py_gel.Vec3dVector_delete.argtypes = (ct.c_void_p,)
# I3DTree
lib_py_gel.I3DTree_new.restype = ct.c_void_p
lib_py_gel.I3DTree_delete.argtypes = (ct.c_void_p,)
lib_py_gel.I3DTree_insert.argtypes = (ct.c_void_p, ct.c_double, ct.c_double, ct.c_double, ct.c_size_t)
lib_py_gel.I3DTree_build.argtypes = (ct.c_void_p,)
lib_py_gel.I3DTree_closest_point.argtypes = (ct.c_void_p, ct.c_double, ct.c_double, ct.c_double, ct.c_double, ct.POINTER(ct.c_double*3), ct.POINTER(ct.c_size_t))
lib_py_gel.I3DTree_in_sphere.argtypes = (ct.c_void_p, ct.c_double, ct.c_double, ct.c_double, ct.c_double, ct.c_void_p,ct.c_void_p)
# Manifold class
lib_py_gel.Manifold_from_triangles.argtypes = (ct.c_size_t,ct.c_size_t, np.ctypeslib.ndpointer(ct.c_double), np.ctypeslib.ndpointer(ct.c_int))
lib_py_gel.Manifold_from_triangles.restype = ct.c_void_p
lib_py_gel.Manifold_from_points.argtypes = (ct.c_size_t,np.ctypeslib.ndpointer(ct.c_double), np.ctypeslib.ndpointer(ct.c_double),np.ctypeslib.ndpointer(ct.c_double))
lib_py_gel.Manifold_from_points.restype = ct.c_void_p
lib_py_gel.Manifold_new.restype = ct.c_void_p
lib_py_gel.Manifold_copy.restype = ct.c_void_p
lib_py_gel.Manifold_copy.argtypes = (ct.c_void_p,)
lib_py_gel.Manifold_delete.argtypes = (ct.c_void_p,)
lib_py_gel.Manifold_positions.restype = ct.c_size_t
lib_py_gel.Manifold_positions.argtypes = (ct.c_void_p, ct.POINTER(ct.POINTER(ct.c_double)))
lib_py_gel.Manifold_no_allocated_vertices.restype = ct.c_size_t
lib_py_gel.Manifold_no_allocated_vertices.argtypes = (ct.c_void_p,)
lib_py_gel.Manifold_no_allocated_faces.restype = ct.c_size_t
lib_py_gel.Manifold_no_allocated_faces.argtypes = (ct.c_void_p,)
lib_py_gel.Manifold_no_allocated_halfedges.restype = ct.c_size_t
lib_py_gel.Manifold_no_allocated_halfedges.argtypes = (ct.c_void_p,)
lib_py_gel.Manifold_vertices.restype = ct.c_size_t
lib_py_gel.Manifold_vertices.argtypes = (ct.c_void_p, ct.c_void_p)
lib_py_gel.Manifold_faces.restype = ct.c_size_t
lib_py_gel.Manifold_faces.argtypes = (ct.c_void_p, ct.c_void_p)
lib_py_gel.Manifold_halfedges.restype = ct.c_size_t
lib_py_gel.Manifold_halfedges.argtypes = (ct.c_void_p,ct.c_void_p)
lib_py_gel.Manifold_circulate_vertex.restype = ct.c_size_t
lib_py_gel.Manifold_circulate_vertex.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_char, ct.c_void_p)
lib_py_gel.Manifold_circulate_face.restype = ct.c_size_t
lib_py_gel.Manifold_circulate_face.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_char, ct.c_void_p)
lib_py_gel.Manifold_add_face.argtypes = (ct.c_void_p, ct.c_size_t, np.ctypeslib.ndpointer(ct.c_double))
lib_py_gel.Manifold_remove_face.restype = ct.c_bool
lib_py_gel.Manifold_remove_face.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_remove_edge.restype = ct.c_bool
lib_py_gel.Manifold_remove_edge.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_remove_vertex.restype = ct.c_bool
lib_py_gel.Manifold_remove_vertex.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_vertex_in_use.restype = ct.c_bool
lib_py_gel.Manifold_vertex_in_use.argtypes = (ct.c_void_p,ct.c_size_t)
lib_py_gel.Manifold_face_in_use.restype = ct.c_bool
lib_py_gel.Manifold_face_in_use.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_halfedge_in_use.restype = ct.c_bool
lib_py_gel.Manifold_halfedge_in_use.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_flip_edge.restype = ct.c_bool
lib_py_gel.Manifold_flip_edge.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_collapse_edge.restype = ct.c_bool
lib_py_gel.Manifold_collapse_edge.argtypes = (ct.c_void_p,ct.c_size_t,ct.c_bool)
lib_py_gel.Manifold_split_face_by_edge.restype = ct.c_size_t
lib_py_gel.Manifold_split_face_by_edge.argtypes = (ct.c_void_p, ct.c_size_t,ct.c_size_t,ct.c_size_t)
lib_py_gel.Manifold_split_face_by_vertex.restype = ct.c_size_t
lib_py_gel.Manifold_split_face_by_vertex.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_split_edge.restype = ct.c_size_t
lib_py_gel.Manifold_split_edge.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Manifold_stitch_boundary_edges.restype = ct.c_bool
lib_py_gel.Manifold_stitch_boundary_edges.argtypes = (ct.c_void_p, ct.c_size_t,ct.c_size_t)
lib_py_gel.Manifold_merge_faces.restype = ct.c_bool
lib_py_gel.Manifold_merge_faces.argtypes = (ct.c_void_p, ct.c_size_t,ct.c_size_t)
lib_py_gel.Manifold_close_hole.argtypes = (ct.c_void_p,ct.c_size_t)
lib_py_gel.Manifold_cleanup.argtypes = (ct.c_void_p,)
# Walker is a helper class assisting us in navigating a mesh.
# Not directly expose in PyGEL3D
lib_py_gel.Walker_next_halfedge.restype = ct.c_size_t
lib_py_gel.Walker_next_halfedge.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Walker_prev_halfedge.restype = ct.c_size_t
lib_py_gel.Walker_prev_halfedge.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Walker_opposite_halfedge.restype = ct.c_size_t
lib_py_gel.Walker_opposite_halfedge.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Walker_incident_face.restype = ct.c_size_t
lib_py_gel.Walker_incident_face.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Walker_incident_vertex.restype = ct.c_size_t
lib_py_gel.Walker_incident_vertex.argtypes = (ct.c_void_p, ct.c_size_t)
# A list of helper functions
lib_py_gel.is_halfedge_at_boundary.restype = ct.c_bool
lib_py_gel.is_halfedge_at_boundary.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.is_vertex_at_boundary.restype = ct.c_bool
lib_py_gel.is_vertex_at_boundary.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.length.restype = ct.c_double
lib_py_gel.length.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.boundary_edge.restype = ct.c_bool
lib_py_gel.boundary_edge.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_size_t)
lib_py_gel.valency.restype = ct.c_size_t
lib_py_gel.valency.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.vertex_normal.argtypes = (ct.c_void_p, ct.c_size_t, ct.POINTER(ct.c_double*3))
lib_py_gel.connected.restype = ct.c_bool
lib_py_gel.connected.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_size_t)
lib_py_gel.no_edges.restype = ct.c_size_t
lib_py_gel.no_edges.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.face_normal.argtypes = (ct.c_void_p, ct.c_size_t, ct.POINTER(ct.c_double*3))
lib_py_gel.area.restype = ct.c_double
lib_py_gel.area.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.perimeter.restype = ct.c_double
lib_py_gel.perimeter.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.centre.argtypes = (ct.c_void_p, ct.c_size_t, ct.POINTER(ct.c_double*3))
lib_py_gel.valid.restype = ct.c_bool
lib_py_gel.valid.argtypes = (ct.c_void_p,)
lib_py_gel.closed.restype = ct.c_bool
lib_py_gel.closed.argtypes = (ct.c_void_p,)
lib_py_gel.bbox.argtypes = (ct.c_void_p, ct.POINTER(ct.c_double*3),ct.POINTER(ct.c_double*3))
lib_py_gel.bsphere.argtypes = (ct.c_void_p, ct.POINTER(ct.c_double*3), ct.POINTER(ct.c_double))
lib_py_gel.stitch_mesh.argtypes = (ct.c_void_p,ct.c_double)
lib_py_gel.stitch_mesh.restype = ct.c_int
lib_py_gel.obj_save.argtypes = (ct.c_char_p, ct.c_void_p)
lib_py_gel.off_save.argtypes = (ct.c_char_p, ct.c_void_p)
lib_py_gel.x3d_save.argtypes = (ct.c_char_p, ct.c_void_p)
lib_py_gel.obj_load.argtypes = (ct.c_char_p, ct.c_void_p)
lib_py_gel.off_load.argtypes = (ct.c_char_p, ct.c_void_p)
lib_py_gel.ply_load.argtypes = (ct.c_char_p, ct.c_void_p)
lib_py_gel.x3d_load.argtypes = (ct.c_char_p, ct.c_void_p)
lib_py_gel.remove_caps.argtypes = (ct.c_void_p, ct.c_float)
lib_py_gel.remove_needles.argtypes = (ct.c_void_p, ct.c_float, ct.c_bool)
lib_py_gel.close_holes.argtypes = (ct.c_void_p,ct.c_int)
lib_py_gel.flip_orientation.argtypes = (ct.c_void_p,)
lib_py_gel.merge_coincident_boundary_vertices.argtypes = (ct.c_void_p, ct.c_double)
lib_py_gel.minimize_curvature.argtypes = (ct.c_void_p,ct.c_bool)
lib_py_gel.minimize_dihedral_angle.argtypes = (ct.c_void_p, ct.c_int, ct.c_bool, ct.c_bool, ct.c_double)
lib_py_gel.maximize_min_angle.argtypes = (ct.c_void_p,ct.c_float,ct.c_bool)
lib_py_gel.optimize_valency.argtypes = (ct.c_void_p,ct.c_bool)
lib_py_gel.randomize_mesh.argtypes = (ct.c_void_p,ct.c_int)
lib_py_gel.quadric_simplify.argtypes = (ct.c_void_p,ct.c_double,ct.c_double,ct.c_bool)
lib_py_gel.average_edge_length.argtypes = (ct.c_void_p,)
lib_py_gel.average_edge_length.restype = ct.c_float
lib_py_gel.median_edge_length.argtypes = (ct.c_void_p,)
lib_py_gel.median_edge_length.restype = ct.c_float
lib_py_gel.refine_edges.argtypes = (ct.c_void_p,ct.c_float)
lib_py_gel.refine_edges.restype = ct.c_int
lib_py_gel.cc_split.argtypes = (ct.c_void_p,)
lib_py_gel.loop_split.argtypes = (ct.c_void_p,)
lib_py_gel.root3_subdivide.argtypes = (ct.c_void_p,)
lib_py_gel.rootCC_subdivide.argtypes = (ct.c_void_p,)
lib_py_gel.butterfly_subdivide.argtypes = (ct.c_void_p,)
lib_py_gel.cc_smooth.argtypes = (ct.c_void_p,)
lib_py_gel.loop_smooth.argtypes = (ct.c_void_p,)
lib_py_gel.ear_clip_triangulate.argtypes = (ct.c_void_p,)
lib_py_gel.shortest_edge_triangulate.argtypes = (ct.c_void_p,)
lib_py_gel.graph_to_feq.argtypes = (ct.c_void_p, ct.c_void_p, ct.POINTER(ct.c_double))
lib_py_gel.graph_to_feq.restype = ct.c_void_p
lib_py_gel.taubin_smooth.argtypes = (ct.c_void_p, ct.c_int)
lib_py_gel.laplacian_smooth.argtypes = (ct.c_void_p, ct.c_float, ct.c_int)
lib_py_gel.volumetric_isocontouring.argtypes = (ct.c_void_p, ct.c_int, ct.c_int, ct.c_int, ct.POINTER(ct.c_float), ct.POINTER(ct.c_double), ct.POINTER(ct.c_double), ct.c_float, ct.c_bool, ct.c_bool )
# MeshDistance allows us to compute the signed distance to a mesh
lib_py_gel.MeshDistance_new.restype = ct.c_void_p
lib_py_gel.MeshDistance_new.argtypes = (ct.c_void_p,)
lib_py_gel.MeshDistance_signed_distance.argtypes = (ct.c_void_p,ct.c_int, ct.POINTER(ct.c_float),ct.POINTER(ct.c_float),ct.c_float)
lib_py_gel.MeshDistance_ray_inside_test.argtypes = (ct.c_void_p,ct.c_int, ct.POINTER(ct.c_float),ct.POINTER(ct.c_int),ct.c_int)
lib_py_gel.MeshDistance_delete.argtypes = (ct.c_void_p,)
# The Graph class
lib_py_gel.Graph_new.restype = ct.c_void_p
lib_py_gel.Graph_copy.restype = ct.c_void_p
lib_py_gel.Graph_copy.argtypes = (ct.c_void_p,)
lib_py_gel.Graph_delete.argtypes = (ct.c_void_p,)
lib_py_gel.Graph_clear.argtypes = (ct.c_void_p,)
lib_py_gel.Graph_cleanup.argtypes = (ct.c_void_p,)
lib_py_gel.Graph_nodes.argtypes = (ct.c_void_p, ct.c_void_p)
lib_py_gel.Graph_nodes.restype = ct.c_size_t
lib_py_gel.Graph_neighbors.restype = ct.c_size_t
lib_py_gel.Graph_neighbors.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_void_p, ct.c_char)
lib_py_gel.Graph_positions.argtypes = (ct.c_void_p,ct.POINTER(ct.POINTER(ct.c_double)))
lib_py_gel.Graph_positions.restype = ct.c_size_t
lib_py_gel.Graph_average_edge_length.argtypes = (ct.c_void_p,)
lib_py_gel.Graph_add_node.argtypes = (ct.c_void_p, np.ctypeslib.ndpointer(ct.c_double))
lib_py_gel.Graph_add_node.restype = ct.c_size_t
lib_py_gel.Graph_remove_node.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Graph_node_in_use.argtypes = (ct.c_void_p, ct.c_size_t)
lib_py_gel.Graph_connect_nodes.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_size_t)
lib_py_gel.Graph_connect_nodes.restype = ct.c_size_t
lib_py_gel.Graph_disconnect_nodes.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_size_t)
lib_py_gel.Graph_merge_nodes.argtypes = (ct.c_void_p, ct.c_size_t, ct.c_size_t, ct.c_bool)
# Graph functions
lib_py_gel.graph_from_mesh.argtypes = (ct.c_void_p, ct.c_void_p)
lib_py_gel.graph_load.argtypes = (ct.c_void_p, ct.c_char_p)
lib_py_gel.graph_load.restype = ct.c_void_p
lib_py_gel.graph_save.argtypes = (ct.c_void_p, ct.c_char_p)
lib_py_gel.graph_save.restype = ct.c_bool
lib_py_gel.graph_to_mesh_cyl.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_float)
lib_py_gel.graph_to_mesh_cyl.restype = ct.c_void_p
lib_py_gel.graph_smooth.argtypes = (ct.c_void_p, ct.c_int, ct.c_float)
lib_py_gel.graph_edge_contract.argtypes = (ct.c_void_p, ct.c_double)
lib_py_gel.graph_prune.argtypes = (ct.c_void_p,)
lib_py_gel.graph_LS_skeleton.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_bool)
lib_py_gel.graph_front_skeleton.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_int, ct.POINTER(ct.c_double))
class IntVector:
""" Vector of integer values.
This is a simple class that implements iteration and index based
retrieval. Allocation happens in a call to libPyGEL. Since memory
is managed by the PyGEL library, the vector can be resized by library
functions. Generally not used directly by PyGEL3D users."""
def __init__(self):
self.obj = lib_py_gel.IntVector_new(0)
def __del__(self):
lib_py_gel.IntVector_delete(self.obj)
def __len__(self):
return int(lib_py_gel.IntVector_size(self.obj))
def __getitem__(self,key):
return lib_py_gel.IntVector_get(self.obj,key)
def __iter__(self):
n = lib_py_gel.IntVector_size(self.obj)
for i in range(0,n):
yield lib_py_gel.IntVector_get(self.obj, i)
class Vec3dVector:
""" Vector of 3D vectors.
This is a simple class that implements iteration and index based
retrieval. Allocation happens in a call to libPyGEL. Since memory
is managed by the PyGEL library, the vector can be resized by library
functions. nerally not used directly by PyGEL3D users."""
def __init__(self):
self.obj = lib_py_gel.Vec3dVector_new(0)
def __del__(self):
lib_py_gel.Vec3dVector_delete(self.obj)
def __len__(self):
return int(lib_py_gel.Vec3dVector_size(self.obj))
def __getitem__(self,key):
return lib_py_gel.Vec3dVector_get(self.obj,key)
def __iter__(self):
n = lib_py_gel.Vec3dVector_size(self.obj)
for i in range(0,n):
data = lib_py_gel.Vec3dVector_get(self.obj, i)
yield [data[0], data[1], data[2]]
| 16,560 | 53.837748 | 199 | py |
GEL | GEL-master/src/PyGEL/pygel3d/spatial.py | """ This module provides a kD-tree implementation but specialized to 3D """
from pygel3d import lib_py_gel, Vec3dVector, IntVector
import ctypes as ct
class I3DTree:
""" kD tree specialized for 3D keys and integer values.
This tree data structure is useful for storing 3D points and
associated integer values - typically indices. There is also
a more general kd tree in scipy.spatial if this one does not
suit your needs. """
def __init__(self):
self.obj = lib_py_gel.I3DTree_new()
def __del__(self):
lib_py_gel.I3DTree_delete(self.obj)
def insert(self,p,v):
""" Insert v at 3D point given by p. Insert should be called before
calling build. """
lib_py_gel.I3DTree_insert(self.obj, p[0],p[1],p[2],v)
def build(self):
""" Build the tree. This function call makes the tree searchable. It is
assumed that all calls to insert come before calling this function."""
lib_py_gel.I3DTree_build(self.obj)
def closest_point(self,p,r):
""" Search for point closest to p within a max radius r.
This function should only be called after build. """
key = (ct.c_double * 3)()
val = ct.c_size_t()
n = lib_py_gel.I3DTree_closest_point(self.obj, p[0],p[1],p[2],r,ct.byref(key),ct.byref(val))
if n==1:
return ([key[0],key[1],key[2]],val.value)
return None
def in_sphere(self, p, r):
""" Retrieve all points within a radius r of p.
This function should only be called after build. """
keys = Vec3dVector()
vals = IntVector()
n = lib_py_gel.I3DTree_in_sphere(self.obj, p[0],p[1],p[2],r,keys.obj,vals.obj)
return (keys,vals)
| 1,728 | 42.225 | 100 | py |
GEL | GEL-master/src/PyGEL/pygel3d/gl_display.py | """ This modules provides an OpenGL based viewer for graphs and meshes """
from pygel3d import hmesh, graph, lib_py_gel
import ctypes as ct
import numpy as np
from os import getcwd, chdir
try:
lib_py_gel.GLManifoldViewer_new.restype = ct.c_void_p
lib_py_gel.GLManifoldViewer_delete.argtypes = (ct.c_void_p,)
lib_py_gel.GLManifoldViewer_display.argtypes = (ct.c_void_p,ct.c_void_p,ct.c_void_p,ct.c_char,ct.c_bool, ct.POINTER(ct.c_float*3), ct.POINTER(ct.c_double),ct.c_bool,ct.c_bool)
lib_py_gel.GLManifoldViewer_get_annotation_points.restype = ct.c_size_t
lib_py_gel.GLManifoldViewer_get_annotation_points.argtypes = (ct.c_void_p, ct.POINTER(ct.POINTER(ct.c_double)))
lib_py_gel.GLManifoldViewer_set_annotation_points.argtypes = (ct.c_void_p, ct.c_int, ct.POINTER(ct.c_double))
lib_py_gel.GLManifoldViewer_event_loop.argtypes = (ct.c_bool,)
class Viewer:
""" An OpenGL Viewer for Manifolds and Graphs. Having created an instance of this
class, call display to show a mesh or a graph. The display function is flexible,
allowing several types of interactive visualization. Each instance of this
class corresponds to a single window, but you can have several
GLManifoldViewer and hence also several windows showing different
visualizations. """
def __init__(self):
current_directory = getcwd()
self.obj = lib_py_gel.GLManifoldViewer_new()
chdir(current_directory) # Necessary because init_glfw changes cwd
def __del__(self):
lib_py_gel.GLManifoldViewer_delete(self.obj)
def display(self, m, g=None, mode='w', smooth=True, bg_col=[0.3,0.3,0.3], data=None, reset_view=False, once=False):
""" Display a mesh
Args:
---
- m : the Manifold mesh or Graph we want to show.
- g : the Graph we want to show. If you only want to show a graph, you
can simply pass the graph as m, so the g argument is relevant only if
you need to show both a Manifold _and_ a Graph.
- mode : a single character that determines how the mesh is visualized:
'w' - wireframe,
'i' - isophote,
'g' - glazed (try it and see),
's' - scalar field,
'l' - line field,
'n' - normal.
'x' - xray or ghost rendering. Useful to show Manifold on top of Graph
- smooth : if True we use vertex normals. Otherwise, face normals.
- bg_col : background color.
- data : per vertex data for visualization. scalar or vector field.
- reset_view : if False view is as left in the previous display call. If
True, the view is reset to the default.
- once : if True we immediately exit the event loop and return. However,
the window stays and if the event loop is called from this or any
other viewer, the window will still be responsive.
Interactive controls:
---
When a viewer window is displayed on the screen, you can naviagate with
the mouse: Left mouse button rotates, right mouse button is used for
zooming and (if shift is pressed) for panning. If you hold control, any
mouse button will pick a point on the 3D model. Up to 19 of these points
have unique colors. If you pick an already placed annotation point it
will be removed and can now be placed elsewhere. Hit space bar to clear
the annotation points. Hitting ESC exits the event loop causing control
to return to the script.
"""
data_ct = np.array(data,dtype=ct.c_double).ctypes
data_a = data_ct.data_as(ct.POINTER(ct.c_double))
bg_col_ct = np.array(bg_col,dtype=ct.c_float).ctypes
bg_col_a = bg_col_ct.data_as(ct.POINTER(ct.c_float*3))
if isinstance(m,graph.Graph):
g = m
m = None
if isinstance(m,hmesh.Manifold) and isinstance(g,graph.Graph):
lib_py_gel.GLManifoldViewer_display(self.obj, m.obj, g.obj, ct.c_char(mode.encode('ascii')),smooth,bg_col_a,data_a,reset_view,once)
elif isinstance(m,hmesh.Manifold):
lib_py_gel.GLManifoldViewer_display(self.obj, m.obj, 0, ct.c_char(mode.encode('ascii')),smooth,bg_col_a,data_a,reset_view,once)
elif isinstance(g,graph.Graph):
lib_py_gel.GLManifoldViewer_display(self.obj, 0, g.obj, ct.c_char(mode.encode('ascii')),smooth,bg_col_a,data_a,reset_view,once)
def annotation_points(self):
""" Retrieve a vector of annotation points. This vector is not a copy,
so any changes made to the points will be reflected in the viewer. """
pos = ct.POINTER(ct.c_double)()
n = lib_py_gel.GLManifoldViewer_get_annotation_points(self.obj, ct.byref(pos))
if n == 0:
return None
return np.ctypeslib.as_array(pos,(n,3))
def set_annotation_points(self, pts):
n = int(np.size(pts)/3)
pts_ct = np.array(pts,dtype=ct.c_double).ctypes
pts_a = pts_ct.data_as(ct.POINTER(ct.c_double))
lib_py_gel.GLManifoldViewer_set_annotation_points(self.obj, n, pts_a)
@staticmethod
def event_loop():
""" Explicit call to the event loop. This function enters the event loop.
Call it if you want to turn on interactivity in the currently displayed
window."""
lib_py_gel.GLManifoldViewer_event_loop(False)
except AttributeError:
pass
| 5,764 | 56.65 | 179 | py |
GEL | GEL-master/src/demo/FEQ-Remeshing/feq-remeshing-example.py | #!/opt/local/bin/python
from pygel3d import hmesh, graph, gl_display as gl
from os import getcwd
graphs = [
'hand.graph',
'armadillo_symmetric.graph',
'bunny.graph',
'feline.graph',
'fertility.graph',
'warrior.graph']
objs = [
'usai_hand_tri.obj',
'armadillo.obj',
'bunny.obj',
'feline.obj',
'fertility_tri.obj',
'warrior.obj'
]
iters = [150, 75, 50, 50, 50, 50]
mesh_dir = '../../../data/ReferenceMeshes/'
skel_dir = '../../../data/Graphs/'
viewer = gl.Viewer()
for g_file, o_file, iter in zip(graphs, objs, iters):
print("Remeshing " + o_file)
print('Building FEQ')
s = graph.load(skel_dir + g_file)
m_skel = hmesh.skeleton_to_feq(s)#, [5.0]*len(s.nodes()))
hmesh.cc_split(m_skel)
hmesh.cc_smooth(m_skel)
print('Fitting to reference mesh')
ref_mesh = hmesh.load(mesh_dir + o_file)
fit_mesh = hmesh.Manifold(m_skel)
fit_mesh = hmesh.fit_mesh_to_ref(fit_mesh, ref_mesh, local_iter=iter)
print("Displaying. HIT ESC IN GRAPHICS WINDOW TO PROCEED...")
viewer.display(fit_mesh, reset_view=True)
# viewer.display(m_skel, reset_view=True)
| 1,097 | 21.875 | 73 | py |
cudaImageWarp | cudaImageWarp-master/python/setup.py | from setuptools import setup
setup(
name='pyCudaImageWarp',
version='0.1',
description='Warp images with CUDA',
author='Blaine Rister',
author_email='blaine@stanford.edu',
license='MIT'
)
| 237 | 20.636364 | 44 | py |
cudaImageWarp | cudaImageWarp-master/python/pyCudaImageWarp/cudaImageWarp.py | """
Warp an image using CUDA. Python wrapper for the C library.
(c) Blaine Rister 2018
"""
import numpy as np
import ctypes
import pyCudaImageWarp
"""
Verify that the inputs are correct. Returns default parameter values.
"""
def __check_inputs(im, A, shape, device):
# Make sure device is positive
if device is None:
# -1 signals no preference in device
device = -1
elif device < 0:
raise ValueError(
"received device %d, must be non-negative!" % device)
# Default to the same shape as im
if shape is None:
shape = im.shape
# Check the dimensions
ndim = 3;
Ashape = (ndim, ndim + 1)
if len(im.shape) != ndim:
raise ValueError("im has shape %s, expected %d dimensions" % \
(im.shape, ndim))
if len(shape) != ndim:
raise ValueError("received output shape %s, expected %d "
"dimensions" % (shape, ndim))
if not np.equal(A.shape, Ashape).all():
raise ValueError("Expected A shape %s, received %s" % \
(Ashape, A.shape))
return shape, device
"""
Convert the inputs into the required formats for the C library.
"""
def __convert_inputs(im, A, interp):
# Convert the interpolation string to and integer code
interpMap = {
'nearest' : 0,
'linear' : 1
}
interpCode = interpMap[interp]
# Convert the inputs to C float arrays
im = np.require(im, dtype='float32', requirements=['F', 'A'])
A = np.require(A, dtype='float32', requirements=['C', 'A'])
return im, A, interpCode
"""
Create a C float array for the output
"""
def __create_output(shape):
out = np.zeros(shape, dtype='float32')
out = np.require(out, dtype='float32',
requirements=['F', 'A', 'W', 'O'])
return out
"""
Shortcut to take care of inputs.
"""
def __handle_inputs(im, A, shape, interp, device):
shape, device = __check_inputs(im, A, shape, device)
im, A, interpCode = __convert_inputs(im, A, interp)
return im, A, shape, interpCode, device
"""
Warp a since image. Returns the result in the same datatype as the input.
Arguments:
im -- An image volume, i.e. a 3D numpy array. Indexed in Fortran order,
e.g. im[x, y, z].
A -- A [4x3] matrix defining the transformation. A[0, :] applies to the
x-coordinates, A[1, :] the y-coordinates, A[2, :] the
z-coordinates. See im for more details.
interp -- The interpolation type. Supported values are either
'linear' (default) or 'nearest'.
shape -- The shape of the output. By default, this is the same as the
input. This can be used to crop or pad an image.
std -- The standard derviation of white Gaussian noise added to the
output.
winMax -- The maximum intensity value to be used in the window.
winMin -- The minimum intensity value to be used in the window.
occZmin -- The minimum z-value to be occluded.
occZmax -- The maximum z-value to be occluded.
oob - The value assigned to out-of-bounds voxels.
rounding - The rounding mode applied to the output. The options are:
'nearest' - Python default float to input type conversion
'positive' -
device -- The ID of the CUDA device to be used. (Optional)
"""
def warp(im, A, interp='linear', shape=None, std=0.0,
winMin=-float('inf'), winMax=float('inf'), occZmin=0, occZmax=-1, oob=0,
rounding='default', device=None):
# Handle inputs
im, A, shape, interpCode, device = __handle_inputs(im, A, shape,
interp, device)
# Create the output
out = __create_output(shape)
# Warp
ret = pyCudaImageWarp.warpfun(
im.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.c_int(im.shape[0]),
ctypes.c_int(im.shape[1]),
ctypes.c_int(im.shape[2]),
out.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.c_int(shape[0]),
ctypes.c_int(shape[1]),
ctypes.c_int(shape[2]),
ctypes.c_int(interpCode),
A.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.c_float(std),
ctypes.c_float(winMin),
ctypes.c_float(winMax),
ctypes.c_int(occZmin),
ctypes.c_int(occZmax),
ctypes.c_float(oob),
ctypes.c_int(device)
)
if ret != 0:
raise ValueError(ret)
# Returns in float32 format
return out
"""
Push an image onto the queue. See warp() for parameters.
"""
def push(im, A, interp='linear', shape=None, std=0.0,
winMin=-float('inf'), winMax=float('inf'), occZmin=0, occZmax=-1,
oob=0, rounding='default', device=None):
# Handle inputs
im, A, shape, interpCode, device = __handle_inputs(im, A, shape,
interp, device)
# Enqueue the image warping
ret = pyCudaImageWarp.pushfun(
im.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.c_int(im.shape[0]),
ctypes.c_int(im.shape[1]),
ctypes.c_int(im.shape[2]),
ctypes.c_int(shape[0]),
ctypes.c_int(shape[1]),
ctypes.c_int(shape[2]),
ctypes.c_int(interpCode),
A.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.c_float(std),
ctypes.c_float(winMin),
ctypes.c_float(winMax),
ctypes.c_int(occZmin),
ctypes.c_int(occZmax),
ctypes.c_float(oob),
ctypes.c_int(device)
)
if ret != 0:
raise ValueError(ret)
# Push the inputs onto the queue
pyCudaImageWarp.q.put({
'shape': shape,
'rounding': rounding
})
"""
Finish processing the top image on the queue, returning the result.
"""
def pop():
# Retrieve the inputs
inputs = pyCudaImageWarp.q.get_nowait()
# Create the output array
out = __create_output(inputs['shape'])
# Get the result
ret = pyCudaImageWarp.popfun(
out.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
)
if ret != 0:
raise ValueError(ret)
# Returns in float32 format
return out
| 6,830 | 32.485294 | 80 | py |
cudaImageWarp | cudaImageWarp-master/python/pyCudaImageWarp/augment3d.py | import math
import numpy as np
import scipy.ndimage as nd
from pyCudaImageWarp import cudaImageWarp
"""
Pad the image to have a singleton channel dimension.
"""
def __pad_channel__(im):
ndim = 3
return np.expand_dims(im, ndim) if len(im.shape) < ndim + 1 else im
"""
As __pad_channel__, but for shapes rather than arrays.
"""
def __shape_pad_channel__(shape):
ndim = 3
if len(shape) < ndim + 1:
shape = shape + (1,) * (ndim + 1 - len(shape))
return shape
"""
Adjust the translation component of an affine transform so that it maps
'point' to 'target'. Does not change the linear component.
"""
def set_point_target_affine(mat, point, target):
mat = mat.astype(float)
mat[0:3, 3] = target - mat[0:3, 0:3].dot(point[np.newaxis].T).T
return mat
def jitter_mask(labels, pQuit=0.5, maxIter=1, pKeep=0.5, pJagged=0.5):
"""
Slightly modify a set of labels, with randomness. Only modifies the
image mask, that is, the labels less than zero. Jitters the perimeter
"""
# With probability pQuit, do nothing at all
if np.random.uniform() <= pQuit:
return labels
# Do nothing if all the labels are valid
invalid = labels == -1
if not np.any(invalid):
return labels
# Randomly draw the number of iterations
iters = int(round(np.random.uniform(low=1, high=maxIter)))
# Erode or dilate smoothly
if np.random.uniform() > pJagged:
if np.random.uniform() > 0.5:
invalid = nd.morphology.binary_erosion(invalid, iterations=iters)
else:
invalid = nd.morphology.binary_dilation(invalid, iterations=iters)
else:
# Jitter the boundary in each iteration
for i in range(iters):
# Chose whether to erode or dilate
if np.random.uniform() > 0.5:
new = nd.morphology.binary_erosion(invalid)
else:
new = nd.morphology.binary_dilation(invalid)
# Get the difference and randomly choose whether to keep them
diff = new ^ invalid
invalid[diff] = np.random.uniform(size=(np.sum(diff),)) <= pKeep
# Return the result
result = np.zeros_like(labels)
result[invalid] = -1
result[~invalid] = labels[~invalid]
return result
def get_translation_affine(offset):
"""
Returns a 4x4 affine matrix (homogeneous coordinates) shifting by the
given offset.
"""
mat = np.eye(4)
mat[0:3, 3] = offset
return mat
"""
Check that the image shape is compatible with the xform shape, up to ndim.
Ignores channels unless they're >1.
"""
def __check_shapes__(imShape, xformShape, ndim=3):
hasChannels = len(imShape) > ndim and imShape[ndim] > 1
if hasChannels and xformShape[ndim] != imShape[ndim]:
raise ValueError("Output shape has %d channels, while input has %d" % \
(xformShape[3], imShape[3]))
if len(xformShape[:ndim]) != len(imShape[:ndim]):
raise ValueError("""
Input and output shapes have mismatched number of dimensions.
Input: %s, Output: %s"
""" % (xformShape, imShape))
def __shape_center__(shape):
return (np.array(shape[:3]) - 1.0) / 2.0
def __crop_uniform__(im_center, crop_half_range):
"""
Choose uniformly between valid crops. For compatibility with more
complicated methods, returns the displacement and object center, which
is just the center of the image in this case.
"""
crop_offset = np.random.uniform(low=-crop_half_range, high=crop_half_range)
crop_center = im_center + crop_offset
return crop_center, crop_center
def __crop_in_mask__(crop_half_range, mask, printFun=None):
"""
Crop only in this mask, if at all possible. Returns the displacement
the center of the object around which we're trying to crop.
"""
return __sparse_crop_in_mask__(crop_half_range, mask.shape,
np.flatnonzero(mask), printFun)
def __sparse_crop_in_mask__(crop_half_range, in_shape, inds, printFun=None):
# Compute shape parameters
im_center = __shape_center__(in_shape)
# Check if the mask is empty
if len(inds) == 0:
if printFun is not None:
printFun("Defaulting to uniform crop...")
return __crop_uniform__(im_center, crop_half_range)
# Pick a random center in the range
center_idx = np.random.choice(inds)
object_center = np.array(np.unravel_index(center_idx, in_shape))
# Pick the valid crop with a center closest to this one (clamps coordinates)
object_disp = object_center - im_center
crop_disp = np.minimum(
crop_half_range,
np.maximum(object_disp, -crop_half_range)
)
crop_center = im_center + crop_disp
return crop_center, object_center
"""
Randomly generates a 3D affine map based on the given parameters. Then
applies the map to warp the input image and, optionally, the segmentation.
Warping is done on the GPU using pyCudaImageWarp. By default, the output
shape is the same as that of the input image.
By default, the function only generates the identity map. The affine
transform distribution is controlled by the following parameters:
inShape - The shape of the input image.
seg - The input segmentation, same shape as im (optional).
outShape - The output shape (optional).
init - The initial linear transform. Defaults to identity.
rotMax - Uniform rotation about (x,y,z) axes. For example, (10,10,10)
means +-10 degrees in about each axis.
pReflect - Chance of reflecting about (x,y,z) axis. For example,
(.5, 0, 0) means there is a 50% chance of reflecting about the
x-axis.
shearMax - Uniform shearing about each axis. For example, (1.1, 1.1,
1.1) shears in each axis in the range (1.1, 1 / 1.1)
transMax - Uniform translation in each coordinate. For example, (10, 10,
10) translates by at most +-10 voxels in each coordinate.
otherScale - Gaussian-distributed affine transform. This controls the
variance of each parameter.
randomCrop - Choose whether to randomly crop the image. Possible modes:
'none' - Do no cropping (default).
'uniform' - All crops are equally likely.
'valid' - Like uniform, but only for crops with non-negative label.
'nonzero' - Choose only from crops whose centers have a positive
label. Cannot be used if seg is None.
noiseLevel - An array of C elements. Decide the amount of noise for each channel
using this standard deviation.
windowMin - A 2xC matrix, where C is the number of channels in the image,
from which the lower window threshold is sampled uniformly. By
default, this does nothing. The cth row defines the limits for the
cth channel.
windowMax - A matrix from which the upper window threshold is
sampled uniformly. Same format as winMin. By default, this does
nothing.
occludeProb - Probability that we randomly take out a chunk of out of
the image.
oob_label - The label assigned to out-of-bounds pixels (default: 0)
printFun - If provided, use this function to print the parameters.
oob_image_val - If provided, set out-of-bounds voxels to this value.
api - The underlying computation platform. Either 'cuda' or 'scipy'.
device - The index of the CUDA device, if provided.
All transforms fix the center of the image, except for translation.
"""
def get_xform(inShape, seg=None, outShape=None, randSeed=None,
rotMax=(0, 0, 0), pReflect=(0, 0, 0), init=np.eye(3),
shearMax=(1,1,1), transMax=(0,0,0), otherScale=0, randomCrop='none',
noiseLevel=None, windowMin=None, windowMax=None,
occludeProb=0.0, printFun=None):
# Default to the same output as input shape
if outShape is None:
outShape = inShape
# Pad the shapes with missing dimensions
inShape = __shape_pad_channel__(inShape)
outShape = __shape_pad_channel__(outShape)
numChannels = outShape[-1]
# Check that the input and output shapes are compatible
__check_shapes__(inShape, outShape)
# Set the random seed, if specified
if randSeed is not None:
np.random.seed(randSeed)
# ---Randomly generate the desired transforms, in homogeneous coordinates---
# Draw the noise level
if noiseLevel is not None:
noiseScale = [np.abs(np.random.normal(scale=n)) \
if n > 0 else 0 for n in noiseLevel]
else:
noiseScale = np.zeros(inShape[-1])
# Draw the width of occlusion, if any
if np.random.uniform() < occludeProb:
occludeWidth = int(np.floor(np.random.uniform(low=0,
high=inShape[2] / 2)))
else:
occludeWidth = None
mat_init = np.identity(4)
mat_init[0:3, 0:3] = init
# Get the center of the input volume
im_center = __shape_center__(inShape)
out_center = __shape_center__(outShape)
# Compute the input crop center, and an optional augmentation fixed point
# if we're cropping around some object
if printFun is not None:
printFun("cropType: %s" % randomCrop)
crop_half_range = np.maximum(im_center - out_center, 0)
if randomCrop == 'none':
crop_center = im_center
object_center = crop_center
elif randomCrop == 'uniform':
crop_center, object_center = __crop_uniform__(im_center, crop_half_range)
elif randomCrop == 'valid':
if seg is None:
raise ValueError('Cannot use randomCrop == \'valid\' when seg is not provided!')
# Take the intersection of the crop range and valid classes
crop_center, object_center = __crop_in_mask__(crop_half_range, seg >= 0)
elif randomCrop == 'nonzero':
if seg is None:
raise ValueError('Cannot use randomCrop == \'nonzero\' when seg is not provided!')
crop_center, object_center = __crop_in_mask__(crop_half_range, seg > 0)
else:
raise ValueError('Unrecognized randomCrop: ' + randomCrop)
# Uniform rotation
rotate_deg = np.random.uniform(low=-np.array(rotMax), high=rotMax)
lin_rotate = np.identity(3)
for i in range(3): # Rotate about each axis and combine
# Compute the angle of rotation, in radians
rad = rotate_deg[i] * 2 * math.pi / 360
# Form the rotation matrix about this axis
rot = np.identity(3)
axes = [x for x in range(3) if x != i]
rot[axes[0], axes[0]] = math.cos(rad)
rot[axes[0], axes[1]] = -math.sin(rad)
rot[axes[1], axes[0]] = -rot[axes[0], axes[1]]
rot[axes[1], axes[1]] = rot[axes[0], axes[0]]
# Compose all the rotations
lin_rotate = lin_rotate.dot(rot)
# Extend the linear rotation to an affine transform
mat_rotate = np.identity(4)
mat_rotate[0:3, 0:3] = lin_rotate
# Uniform shear, same chance of shrinking and growing
shearMax = np.array(shearMax)
if np.any(shearMax <= 0):
raise ValueError("Invalid shearMax: %s" % shearMax)
shearScale = np.abs(shearMax - 1.0)
shear = np.array([np.random.normal(loc=1.0,
scale=float(s) / 4) if s > 0 else 1.0 for s in shearScale])
invert_shear = np.random.uniform(size=3) < 0.5
shear[invert_shear] = [1.0 / s if s != 0 else 0 for s in shear[invert_shear]]
mat_shear = np.diag(np.hstack((shear, 1)))
# Reflection
do_reflect = np.random.uniform(size=3) < pReflect
mat_reflect = np.diag(np.hstack((1 - 2 * do_reflect, 1)))
# Generic affine transform, Gaussian-distributed
mat_other = np.identity(4)
mat_other[0:3, :] = mat_other[0:3, :] + \
(np.random.normal(loc=0.0, scale=otherScale, size=(3,4)) \
if otherScale > 0 else 0)
# Uniform translation
transMax = np.array(transMax)
translation = np.random.uniform(low=-transMax,
high=transMax) if np.any(transMax > 0) else np.zeros_like(transMax)
# Compose all the transforms, fix the center of the crop or the center
# of the selected object, depending on the mode
object_center_output = out_center + object_center - crop_center # out_center in uniform mode
mat_total = set_point_target_affine(
mat_rotate.dot( mat_shear.dot( mat_reflect.dot( mat_other.dot( mat_init)
))),
object_center_output,
object_center + translation
)
# Any columns with infinity are unchanged
winMin = np.array([-float('inf') for x in range(numChannels)])
winMax = np.array([float('inf') for x in range(numChannels)])
validCols = ~np.any(
(windowMin is not None and (np.abs(windowMin) == float('inf'))
| (windowMax is not None and np.abs(windowMax) == float('inf'))),
axis=0
)
# Draw the window thresholds uniformly in the specified range
numChannels = inShape[-1]
if windowMin is not None:
winMin[validCols] = np.random.uniform(
low=windowMin[0, validCols],
high=windowMin[1, validCols]
)
if windowMax is not None:
winMax[validCols] = np.random.uniform(
low=windowMax[0, validCols],
high=windowMax[1, validCols]
)
# Draw the occlusion parameters
if occludeWidth is not None:
# Take a chunk out at random
occZmin = int(np.floor(np.random.uniform(
low=-occludeWidth, high=inShape[2])))
occZmax = occZmin + occludeWidth - 1
else:
# By default, do no occlusion
occZmin = 0
occZmax = -1
# Optionally print the result
if printFun is not None:
printFun("crop_center: [%d, %d, %d]" % (crop_center[0], crop_center[1], crop_center[2]))
printFun("occZmin: %d occZmax: %d" % (occZmin, occZmax))
printFun("winmin: %s winmax: %s" % (winMin, winMax))
printFun("rotation: [%d, %d, %d]" % (rotate_deg[0], rotate_deg[1],
rotate_deg[2]))
printFun("translation: [%d, %d, %d]" % (translation[0], translation[1],
translation[2]))
# Return a dict containing all the transform parameters
return {
'affine': mat_total,
'occZmin': occZmin,
'occZmax': occZmax,
'winMin': winMin,
'winMax': winMax,
'noiseScale': noiseScale,
'shape': outShape
}
"""
Choose the implementation based on the api string.
"""
def __get_pushFun_popFun__(api):
if api == 'cuda':
pushFun = cudaImageWarp.push
popFun = cudaImageWarp.pop
elif api == 'scipy':
from pyCudaImageWarp import scipyImageWarp
pushFun = scipyImageWarp.push
popFun = scipyImageWarp.pop
else:
raise ValueError('Unrecognized api: ' + api)
return pushFun, popFun
def apply_xforms_images(xformList, imList, oob=0,
api='cuda', device=None):
"""
Shortcut for only images.
"""
return apply_xforms(xformList, imList=imList, oob_image=oob,
api=api, device=device)
def apply_xforms_labels(xformList, labelsList, oob=0, rounding=None,
api='cuda', device=None):
"""
Shortcut for only labels.
"""
return apply_xforms(xformList, labelsList=labelsList, oob_labels=oob,
rounding=rounding, api=api, device=device)
def apply_xforms(xformList, imList=None, labelsList=None, oob_image=0,
oob_label=0, rounding=None, api='cuda', device=None):
"""
Apply transforms which were created with get_xform. Applies rounding to
the labels. Rounding options:
None - Default float to int conversion
'ceil' - Rounds up
"""
# Check the operating mode
haveImages = imList is not None
haveLabels = labelsList is not None
if not haveImages and not haveLabels:
raise ValueError("Received neither images nor labels!")
# Verify inputs
if haveImages and len(xformList) != len(imList):
raise ValueError("Received %d xforms but %d images" % (len(xformList),
len(imList)))
if haveLabels and len(xformList) != len(labelsList):
raise ValueError("Received %d xforms but %d labels" % (len(xformList),
len(labelsList)))
# Get the implementation
pushFun, popFun = __get_pushFun_popFun__(api)
# Push all the images
if haveImages:
__push_xforms_images__(pushFun, xformList, imList, oob_image, device)
# Push all the labels
if haveLabels:
__push_xforms_labels__(pushFun, xformList, labelsList, oob_label,
device)
# Pop all the images
returns = []
if haveImages:
returns.append(__pop_xforms__(imList, xformList, popFun))
# Pop all the labels
if haveLabels:
returns.append(
[__round_image__(im=im, rounding=rounding) for im in __pop_xforms__(
labelsList, xformList, popFun)]
)
return tuple(returns)
def __round_image__(im=None, rounding=None):
"""
Rounds the image according to the rounding mode.
"""
if rounding is None:
return im
roundFunMap = {
'ceil': np.ceil
}
try:
roundFun = roundFunMap[rounding]
except KeyError:
raise ValueError("Unrecognized rounding mode: %s" % rounding)
return roundFun(im).astype(int)
def __pop_xforms__(imList, xformList, popFun):
"""
Shortcut to pop a list of outputs, from the given inputs and xforms.
"""
augImList = []
for im, xform in zip(imList, xformList):
shape = xform['shape'][:len(im.shape)]
augImList.append(__pop_xform(shape, im.dtype, popFun))
return augImList
def __push_xforms_images__(*args):
"""
Push a list of images. Arguments same as __push_xforms__, except
supplies pushTypeFun.
"""
__push_xforms__(__push_xform_image__, *args)
def __push_xforms_labels__(*args):
"""
Push a list of labels. Arugments same as __push_xforms_images__.
"""
__push_xforms__(__push_xform_labels__, *args)
def __push_xforms__(pushTypeFun, pushFun, xformList, imList, oob, device):
"""
Shortcut to push a list of images or labels, using pushTypeFun. Not
called directly.
"""
for im, xform in zip(imList, xformList):
pushTypeFun(xform, im, pushFun, oob, device)
def __push_xform_image__(xform, im, pushFun, oob, device):
"""
Start processing an image. Called by apply_xforms.
"""
# Add a channel dimension
im = __pad_channel__(im)
# Check the shapes
__check_shapes__(im.shape, xform['shape'])
# Warp each image channel the same way
warp_affine = xform['affine'][0:3, :]
shape = xform['shape'][:3]
numChannels = xform['shape'][3]
for c in range(numChannels):
pushFun(
im[:, :, :, c],
warp_affine,
interp='linear',
shape=shape,
std=xform['noiseScale'][c],
winMin=xform['winMin'][c],
winMax=xform['winMax'][c],
occZmin=xform['occZmin'],
occZmax=xform['occZmax'],
oob=oob,
device=device
)
def __push_xform_labels__(xform, labels, pushFun, oob, device):
"""
Like __push_xform_image__, but for labels.
"""
# Check the shapes
__check_shapes__(labels.shape[:3], xform['shape'][:3])
warp_affine = xform['affine'][0:3, :]
shape = xform['shape'][:3]
pushFun(
labels,
warp_affine,
interp='nearest',
shape=shape,
occZmin=xform['occZmin'],
occZmax=xform['occZmax'],
oob=oob,
device=device
)
return
def __pop_xform(shape, dtype, popFun):
"""
Finish processing an image, and return the result. Squeezes out the channel
dimension, if necessary.
"""
# Pop multi-channel images one channel at a time
if len(shape) > 3 and shape[3] > 1:
im = np.zeros(shape, dtype=dtype, order='F')
for c in range(shape[3]):
im[:, :, :, c] = popFun()
return im
# Pop a single-channel image
return popFun()
| 20,411 | 34.685315 | 97 | py |
cudaImageWarp | cudaImageWarp-master/python/pyCudaImageWarp/__init__.py |
"""
Python module initializer for CUDA image warper.
(c) Blaine Rister 2018
"""
import os
import ctypes
# Import queue -- annoying that the name keeps changing
try:
import queue
except:
import Queue as queue
# Initialize a queue for recording inputs
try:
q = queue.queue()
except:
q = queue.Queue()
# Load the library
libName = 'libcudaImageWarp.so'
scriptDir = os.path.abspath(os.path.dirname(__file__))
prefixes = [scriptDir, '/usr/lib', '/usr/local/lib']
# Try to find the library using each of the available prefixes, in order
dll = None
searched = []
for prefix in prefixes:
searchName = os.path.join(prefix, libName)
if not os.path.exists(searchName):
searched.append(searchName)
continue
dll = ctypes.cdll.LoadLibrary(searchName)
break
if dll is None:
raise OSError('Cannot find library ' + libName + '. Searched the ' +
'following paths: ' + '\n'.join(searched))
# Extract the single-image warping function
warpfun = dll.cuda_image_warp
warpfun.argtypes = [
ctypes.POINTER(ctypes.c_float),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.POINTER(ctypes.c_float),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.POINTER(ctypes.c_float),
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_int,
ctypes.c_int,
ctypes.c_float,
ctypes.c_int
]
warpfun.restype = ctypes.c_int
# Extract the warp push function
pushfun = dll.cuda_image_warp_push
pushfun.argtypes = [
ctypes.POINTER(ctypes.c_float),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.POINTER(ctypes.c_float),
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_int,
ctypes.c_int,
ctypes.c_float,
ctypes.c_int
]
pushfun.restype = ctypes.c_int
# Extract the warping end function
popfun = dll.cuda_image_warp_pop
popfun.argTypes = [
ctypes.POINTER(ctypes.c_float)
]
popfun.restype = ctypes.c_int
| 2,064 | 20.736842 | 72 | py |
cudaImageWarp | cudaImageWarp-master/python/pyCudaImageWarp/scipyImageWarp.py | """
Warp an image using scipy. This is a CPU-only implementation of cudaImageWarp.py.
(c) Blaine Rister 2018-2021
"""
import numpy as np
import scipy.ndimage as nd
import pyCudaImageWarp
import concurrent.futures
import os
from .cudaImageWarp import __check_inputs
# Place to store worker threads
threadPool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
def push(im, A, interp='linear', shape=None, std=0.0,
winMin=-float('inf'), winMax=float('inf'), occZmin=0, occZmax=-1,
oob=0.0, device=None):
"""
Reimplementation of push() in cudaImageWarp.py
"""
# Check inputs
shape, device = __check_inputs(im, A, shape, device)
# Map from interpolation mode to spline order
interpMap = {
'nearest': 0,
'linear': 1
}
# Push the threadpool future into a queue
pyCudaImageWarp.q.put(threadPool.submit(
__warp_im__,
im,
A,
interpMap[interp],
shape,
std,
winMin,
winMax,
occZmin,
occZmax,
oob
))
def __warp_im__(im=None, A=None, interpCode=None, shape=None, std=None,
winMin=None, winMax=None, occZmin=None, occZmax=None, oob=None):
"""
Main function for image processing. Called in the thread pool.
"""
# Convert to float 32
im = im.astype(np.float32)
# Affine warping
im = nd.affine_transform(
im,
A,
output_shape=shape,
order=interpCode,
mode='constant',
cval=oob,
prefilter=False
)
# Gaussian noise
if std > 0.0:
im += np.random.normal(scale=std)
# Window
im = np.maximum(np.minimum(im, winMax), winMin)
win_width = winMax - winMin
if win_width != float('inf') and win_width != -float('inf'):
im -= winMin
im /= win_width
# Occlusion
occLo = max(occZmin + 1, 1)
occHi = min(occZmax + 1, shape[2])
if occHi > occLo:
im[:, :, occLo : occHi] = 0.0
return im
def pop():
"""
Reimplementaiton of pop() in cudaImageWarp.py
"""
return pyCudaImageWarp.q.get_nowait().result(timeout=30)
def warp(im, A, interp='linear', shape=None, std=0.0,
winMin=-float('inf'), winMax=float('inf'), occZmin=0, occZmax=-1, oob=0,
device=None):
"""
Does a push() then pop()
"""
push(im, A, interp=interp, shape=shape, std=std, winMin=winMin, winMax=winMax,
occZmin=occZmin, occZmax=occZmax, oob=oob, device=device)
return pop()
| 2,561 | 23.4 | 83 | py |
cudaImageWarp | cudaImageWarp-master/test/test.py | import nibabel as nib
import numpy as np
import sys
from pyCudaImageWarp import cudaImageWarp
# Usage: python test.py [input.nii.gz] [output.nii.gz]
inPath = sys.argv[1]
outPath = sys.argv[2]
# Warping matrix
A = np.array([[1, 0, 0, 30],
[0, 1, 0, 0],
[0, 0, 2, 0]])
zMin = 100
zMax = zMin + 50
# Load the image
im = nib.load(inPath)
data = im.get_data()
# Warp, add noise and occlude
dataWarp = cudaImageWarp.warp(data, A, interp='linear', std=50.0, occZmin=zMin,
occZmax=zMax)
# Write the output
imOut = nib.Nifti1Image(dataWarp, im.affine, header=im.header)
nib.save(imOut, outPath)
# Warp without noise
dataWarp = cudaImageWarp.warp(data, A, interp='linear', occZmin=zMin,
occZmax=zMax)
# Warp with the push/pop method, and ensure the results are the same
numIters = 2
for i in range(numIters):
cudaImageWarp.push(data, A, interp='linear', occZmin=zMin, occZmax=zMax)
for i in range(numIters):
dataWarp2 = cudaImageWarp.pop()
assert(np.all(np.equal(dataWarp, dataWarp2)))
| 1,051 | 22.377778 | 80 | py |
cudaImageWarp | cudaImageWarp-master/test/test_augment3d.py | """
Test the data augmentation in augment3d.py
Usage: python test_augment3d.py [input.nii.gz]
"""
import sys
import numpy as np
import nibabel as nib
from pyCudaImageWarp import augment3d
def apply_and_write_output(im, xform, name, api='cuda', oob=0):
out = augment3d.apply_xforms([xform], [im.get_data()], api=api,
oob_image=oob)[0][0]
nib.save(nib.Nifti1Image(out, im.affine, header=im.header), name)
inPath = sys.argv[1]
# Load the image
im = nib.load(inPath)
in_shape = im.get_data().shape
# Test the augmenter with each transform
identity = augment3d.get_xform(in_shape)
apply_and_write_output(im, identity, 'identity.nii.gz')
rotate = augment3d.get_xform(in_shape, rotMax=(90, 90, 90))
apply_and_write_output(im, rotate, 'rotate.nii.gz')
reflect = augment3d.get_xform(in_shape, pReflect=(0, 0, 1))
apply_and_write_output(im, reflect, 'reflect.nii.gz')
shear = augment3d.get_xform(in_shape, shearMax=(2,1,1))
apply_and_write_output(im, shear, 'shear.nii.gz')
translate = augment3d.get_xform(in_shape, transMax=(30,30,30))
apply_and_write_output(im, translate, 'translate.nii.gz')
other = augment3d.get_xform(in_shape, otherScale=0.33)
apply_and_write_output(im, other, 'other.nii.gz')
crop = augment3d.get_xform(in_shape, outShape=(100, 100, 100))
apply_and_write_output(im, crop, 'crop.nii.gz')
noise = augment3d.get_xform(in_shape, noiseLevel=[50])
apply_and_write_output(im, noise, 'noise.nii.gz')
window = augment3d.get_xform(in_shape, windowMin=np.array([[0],[0]]),
windowMax=np.array([[150],[150]]))
apply_and_write_output(im, window, 'window.nii.gz')
occlude = augment3d.get_xform(in_shape, occludeProb=1.0)
apply_and_write_output(im, occlude, 'occlude.nii.gz', oob=10)
# Test the Scipy backup implementation
apply_and_write_output(im, rotate, 'scipy_rotate.nii.gz', api='scipy')
| 1,828 | 30.534483 | 70 | py |
Stimela | Stimela-master/setup.py | #!/usr/bin/env python3
import os
import sys
from setuptools import setup
import glob
PACKAGE_NAME = "stimela"
__version__ = "1.7.9"
build_root = os.path.dirname(__file__)
def readme():
"""Get readme content for package long description"""
with open(os.path.join(build_root, 'README.rst')) as f:
return f.read()
def requirements():
"""Get package requirements"""
with open(os.path.join(build_root, 'requirements.txt')) as f:
return [pname.strip() for pname in f.readlines()]
setup(name=PACKAGE_NAME,
version=__version__,
description="Dockerized radio interferometry scripting framework",
long_description=readme(),
long_description_content_type="text/x-rst",
author="Sphesihle Makhathini & RATT",
author_email="sphemakh@gmail.com",
url="https://github.com/ratt-ru/Stimela",
packages=["stimela", "stimela/cargo",
"stimela/utils", "stimela/cargo/cab",
"stimela/cargo/base"],
package_data={"stimela/cargo": [
"base/*/Dockerfile",
"base/*.template",
"cab/*/src/*.py",
"base/*/xvfb.init.d",
"cab/*/parameters.json",
"cab/*/src/tdlconf.profiles",
]},
python_requires='>=3.6',
install_requires=requirements(),
tests_require=['nose'],
scripts=["bin/" + i for i in os.listdir("bin")] +
glob.glob("stimela/cargo/cab/stimela_runscript"),
classifiers=[],
)
| 1,487 | 29.367347 | 72 | py |
Stimela | Stimela-master/examples/simulation_pipeline.py | # import stimela package
import stimela
import os
# Recipe I/O configuration
INPUT = "input" # This folder must exist
OUTPUT = "output"
MSDIR = "msdir"
PREFIX = "stimela-example" # Prefix for output images
try:
SINGULARTITY_IMAGE_DIR = os.environ["STIMELA_SINGULARTITY_IMAGE_DIR"]
except KeyError:
SINGULARTITY_IMAGE_DIR = None
# MS name
MS = "meerkat_simulation_example.ms"
# Use the NVSS skymodel. This is natively available
LSM = "nvss1deg.lsm.html"
# Start stimela Recipe instance
pipeline = stimela.Recipe("Simulation Example", # Recipe name
ms_dir=MSDIR,
indir=INPUT,
outdir=OUTPUT,
singularity_image_dir=SINGULARTITY_IMAGE_DIR,
log_dir=os.path.join(OUTPUT, "logs"),
)
#pipeline.JOB_TYPE = "docker"
# 1: Make empty MS
pipeline.add("cab/simms", # Executor image to start container from
"simms_example", # Container name
{ # Parameters to parse to executor container
"msname": MS,
"telescope": "meerkat", # Telescope name
"direction": "J2000,0deg,-30deg", # Phase tracking centre of observation
"synthesis": 2, # Synthesis time of observation
"dtime": 30, # Integration time in seconds
"freq0": "750MHz", # Start frequency of observation
"dfreq": "1MHz", # Channel width
"nchan": 16 # Number of channels
},
label="Creating MS", # Process label
cpus=2.5,
memory_limit="2gb")
# 2
pipeline.add("cab/casa_listobs", "obsinfo",
{
"vis" : MS,
"listfile" : MS + "-obsinfo.txt",
"overwrite": True,
},
label="obsinfo:: Observation information")
# 3: Simulate visibilities into it
pipeline.add("cab/simulator",
"simulator_example",
{
"msname": MS,
"skymodel": LSM, # Sky model to simulate into MS
"addnoise": True, # Add thermal noise to visibilities
"column": "DATA",
"Gjones": True, # Simulated data will be saved in this column
"sefd": 831, # Compute noise from this SEFD
# Recentre sky model to phase tracking centre of MS
"tile-size": 64,
"threads": 4,
},
label="Simulating visibilities")
# 4
pipeline.add("cab/calibrator",
"cal_example",
{
"msname": MS,
"skymodel": LSM,
"tile-size": 64,
"threads": 4,
"output-data" : 'CORR_DATA',
},
label="Calibrating visibilities")
# 5,6,7 : Image
# Make things a bit interesting by imaging with different weights
# Briggs robust values to use for each image
briggs_robust = [2, 0, -2]
for i, robust in enumerate(briggs_robust):
pipeline.add("cab/wsclean",
"imager_example_robust_{:d}".format(i),
{
"msname": MS,
"weight": f"briggs {robust}",
"prefix": "{:s}_robust-{:d}".format(PREFIX, robust),
"npix": 4096, # Image size in pixels
"cellsize": 2, # Size of each square pixel
# Perform 1000 iterarions of clean (Deconvolution)
"niter": 5000,
"mgain" : 0.85,
"pol" : "I",
"multiscale": True,
"multiscale-scales" : [0,2],
},
label="Imaging MS, robust={:d}".format(robust),
cpus=2,
memory_limit="2gb")
# 8
pipeline.add("cab/casa_rmtables", "delete_ms", {
"tablenames": MS + ":msfile",
},
label="Remove MS")
pipeline.run()
| 4,293 | 35.389831 | 93 | py |
Stimela | Stimela-master/stimela/main.py | # -*- coding: future_fstrings -*-
import os
import argparse
from argparse import ArgumentParser
import textwrap as _textwrap
import signal
import stimela
from stimela import docker, singularity, podman, utils
from stimela.utils import logger
from stimela.cargo import cab
BASE = stimela.BASE
CAB = stimela.CAB
GLOBALS = stimela.GLOBALS
CAB_USERNAME = stimela.CAB_USERNAME
loglevels = "info debug error"
class MultilineFormatter(argparse.HelpFormatter):
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
paragraphs = text.split('|n ')
multiline_text = ''
for paragraph in paragraphs:
formatted_paragraph = _textwrap.fill(
paragraph, width, initial_indent=indent, subsequent_indent=indent) + '\n\n'
multiline_text = multiline_text + formatted_paragraph
return multiline_text
def build(argv):
for i, arg in enumerate(argv):
if (arg[0] == '-') and arg[1].isdigit():
argv = ' ' + arg
parser = ArgumentParser(description='Build executor (a.k.a cab) images')
parser.add_argument("-b", "--base", action="store_true",
help="Build base images")
parser.add_argument("-c", "--cab", metavar="CAB,CAB_DIR",
help="Executor image (name) name, location of executor image files")
parser.add_argument("-uo", "--us-only",
help="Only build these cabs. Comma separated cab names")
parser.add_argument("-i", "--ignore-cabs", default="",
help="Comma separated cabs (executor images) to ignore.")
parser.add_argument("-p", "--podman", action="store_true",
help="Build images using podman.")
parser.add_argument("-nc", "--no-cache", action="store_true",
help="Do not use cache when building the image")
jtype = "podman" if podman else "docker"
args = parser.parse_args(argv)
no_cache = ["--no-cache"] if args.no_cache else []
if args.cab:
raise SystemExit("DEPRECATION NOTICE: This feature has been deprecated. Please specify your \
custom cab via the 'cabpath' option of the Recipe.add() function.")
if args.base:
# Build base and meqtrees images first
BASE.remove("base")
BASE.remove("meqtrees")
BASE.remove("casa")
BASE.remove("astropy")
for image in ["base", "meqtrees", "casa", "astropy"] + BASE:
dockerfile = "{:s}/{:s}".format(stimela.BASE_PATH, image)
image = "stimela/{0}:{1}".format(image, stimela.__version__)
#__call__(jtype).build(image,
# dockerfile, args=no_cache)
return 0
raise SystemExit("DEPRECATION NOTICE: The building of cab images has been deprecated")
def info(cabdir, header=False, display=True):
""" prints out help information about a cab """
# First check if cab exists
pfile = "{}/parameters.json".format(cabdir)
if not os.path.exists(pfile):
raise RuntimeError("Cab could not be found at : {}".format(cabdir))
# Get cab info
cab_definition = cab.CabDefinition(parameter_file=pfile)
if display:
cab_definition.display(header)
return cab_definition
def cabs(argv):
for i, arg in enumerate(argv):
if (arg[0] == '-') and arg[1].isdigit():
argv = ' ' + arg
parser = ArgumentParser(description='List executor (a.k.a cab) images')
parser.add_argument("-i", "--cab-doc",
help="Will display document about the specified cab. For example, \
to get help on the 'cleanmask cab' run 'stimela cabs --cab-doc cleanmask'")
parser.add_argument("-l", "--list", action="store_true",
help="List cab names")
parser.add_argument("-ls", "--list-summary", action="store_true",
help="List cabs with a summary of the cab")
args = parser.parse_args(argv)
if args.cab_doc:
name = '{0:s}_cab/{1:s}'.format(CAB_USERNAME, args.cab_doc)
cabdir = "{:s}/{:s}".format(stimela.CAB_PATH, args.cab_doc)
info(cabdir)
elif args.list_summary:
for val in CAB:
cabdir = "{:s}/{:s}".format(stimela.CAB_PATH, val)
try:
info(cabdir, header=True)
except IOError:
pass
else:
print(', '.join(CAB))
def run(argv):
for i, arg in enumerate(argv):
if (arg[0] == '-') and arg[1].isdigit():
argv[i] = ' ' + arg
parser = ArgumentParser(description='Dockerized Radio Interferometric Scripting Framework.\n'
'Sphesihle Makhathini <sphemakh@gmail.com>')
add = parser.add_argument
add("-r", "--repository", default="quay.io",
help="Repository from which to pull docker images. The default repository is quay.io")
add("-in", "--input",
help="Input folder")
add("-out", "--output",
help="Output folder")
add("-ms", "--msdir",
help="MS folder. MSs should be placed here. Also, empty MSs will be placed here")
add("-pf", "--pull-folder",
help="Folder to store singularity images.")
add("script",
help="Run script")
add("-g", "--globals", metavar="KEY=VALUE[:TYPE]", action="append", default=[],
help="Global variables to pass to script. The type is assumed to string unless specified")
add("-jt", "--job-type", choices=["docker", "singularity", "podman"],
help="Container technology to use when running jobs")
add("-ll", "--log-level", default="INFO", choices=loglevels.upper().split() + loglevels.split(),
help="Log level. set to DEBUG/debug for verbose logging")
args = parser.parse_args(argv)
_globals = dict(_STIMELA_INPUT=args.input, _STIMELA_OUTPUT=args.output,
_STIMELA_MSDIR=args.msdir,
_STIMELA_JOB_TYPE=args.job_type,
_STIMELA_REP=args.repository,
_STIMELA_LOG_LEVEL=args.log_level.upper(),
_STIMELA_PULLFOLDER=args.pull_folder)
args.job_type = args.job_type or "docker"
nargs = len(args.globals)
global GLOBALS
if nargs:
for arg in args.globals:
if arg.find("=") > 1:
key, value = arg.split("=")
try:
value, _type = value.split(":")
except ValueError:
_type = "str"
GLOBALS[key] = eval("{:s}('{:s}')".format(_type, value))
utils.CPUS = 1
with open(args.script, 'r') as stdr:
exec(stdr.read(), _globals)
def pull(argv):
for i, arg in enumerate(argv):
if (arg[0] == '-') and arg[1].isdigit():
argv[i] = ' ' + arg
parser = ArgumentParser(description='Pull docker stimela base images')
add = parser.add_argument
add("-r", "--repository", default="quay.io",
help="Repository from which to pull docker images. The default repository is quay.io")
add("-im", "--image", nargs="+", metavar="IMAGE[:TAG]",
help="Pull base image along with its tag (or version). Can be called multiple times")
add("-f", "--force", action="store_true",
help="force pull if image already exists")
add("-s", "--singularity", action="store_true",
help="Pull base images using singularity."
"Images will be pulled into the directory specified by the enviroment varaible, STIMELA_PULLFOLDER. $PWD by default")
add("-d", "--docker", action="store_true",
help="Pull base images using docker.")
add("-p", "--podman", action="store_true",
help="Pull base images using podman.")
add("-cb", "--cab-base", nargs="+",
help="Pull base image for specified cab")
add("-at", "--all-tags", action="store_true",
help="Pull all tags for this image")
add("-pf", "--pull-folder",
help="Images will be placed in this folder. Else, if the environmnental variable 'STIMELA_PULLFOLDER' is set, then images will be placed there. "
"Else, images will be placed in the current directory")
args = parser.parse_args(argv)
if args.pull_folder:
pull_folder = args.pull_folder
else:
try:
pull_folder = os.environ["STIMELA_PULLFOLDER"]
except KeyError:
pull_folder = "."
if args.podman:
jtype = "podman"
elif args.singularity:
jtype = "singularity"
elif args.docker:
jtype = "docker"
else:
jtype = "docker"
images_ = []
repository_ = []
for cab in args.cab_base or []:
if cab in CAB:
filename = "/".join([stimela.CAB_PATH, cab, "parameters.json"])
param = utils.readJson(filename)
tags = param["tag"]
if not isinstance(tags, list):
tags = [tags]
for tag in tags:
images_.append(":".join([param["base"], tag]))
repository_.append(param["hub"] if "hub" in param.keys() else args.repository)
repository_ = repository_ + ([args.repository] * len(args.image) if args.image is not None else [])
args.image = images_ + (args.image if args.image is not None else [])
assert len(args.image) == len(repository_)
if args.image:
for hub, image in zip(repository_, args.image):
if hub == "docker" or hub == "docker.io":
hub = ""
if args.singularity:
simage = image.replace("/", "_")
simage = simage.replace(":", "_") + singularity.suffix
image = "/".join([hub, image]) if hub != "" else image
singularity.pull(
image, simage, directory=pull_folder, force=args.force)
else:
if args.podman:
podman.pull("/".join([hub, image]) if hub != "" else image, force=args.force)
else:
docker.pull("/".join([hub, image]) if hub != "" else image, force=args.force)
else: # no cab bases or images specifically being pulled
base = []
repository_ = []
for cab_ in CAB:
filename = "/".join([stimela.CAB_PATH, cab_, "parameters.json"])
param = utils.readJson(filename)
cabdir = "{:s}/{:s}".format(stimela.CAB_PATH, cab_)
_cab = info(cabdir, display=False)
tags = _cab.tag
if isinstance(tags, list):
if not args.all_tags:
tags = [tags[-1]]
else:
tags = [tags]
for tag in tags:
base.append(f"{_cab.base}:{tag}")
repository_.append(param["hub"] if "hub" in param.keys() else args.repository)
assert len(base) == len(repository_)
uniq_pulls = []
for hub, image in zip(repository_, base):
if hub == "docker" or hub == "docker.io":
hub = ""
if args.singularity:
simage = image.replace("/", "_")
simage = simage.replace(":", "_") + singularity.suffix
image = "/".join([hub, image]) if hub != "" else image
if image not in uniq_pulls:
uniq_pulls.append(image)
singularity.pull(
image, simage, directory=pull_folder, force=args.force)
else:
image = "/".join([hub, image]) if hub != "" else image
if image not in uniq_pulls:
uniq_pulls.append(image)
if args.podman:
podman.pull(image, force=args.force)
else:
docker.pull(image, force=args.force)
def main(argv):
for i, arg in enumerate(argv):
if (arg[0] == '-') and arg[1].isdigit():
argv[i] = ' ' + arg
parser = ArgumentParser(description='Stimela: Dockerized Radio Interferometric Scripting Framework. '
'|n version {:s} |n install path {:s} |n '
'Sphesihle Makhathini <sphemakh@gmail.com>'.format(stimela.__version__,
os.path.dirname(__file__)),
formatter_class=MultilineFormatter,
add_help=False)
add = parser.add_argument
add("-h", "--help", action="store_true",
help="Print help message and exit")
add("-v", "--version", action='version',
version='{:s} version {:s}'.format(parser.prog, stimela.__version__))
add("command", nargs="*", metavar="command [options]",
help="Stimela command to execute. For example, 'stimela help run'")
options = []
commands = dict(pull=pull, build=build, run=run, cabs=cabs)
# images=images, ps=ps,
# containers=containers, kill=kill,
# clean=clean)
command = "failure"
for cmd in commands:
if cmd in argv:
command = cmd
index = argv.index(cmd)
options = argv[index+1:]
argv = argv[:index+1]
args = parser.parse_args(argv)
# Command is help and no other commands following
try:
main_help = (args.command[0] == "help" and len(args.command) == 1)
except IndexError:
main_help = True
if args.help or main_help:
parser.print_help()
print("""
Run a command. These can be:
help : Prints out a help message about other commands
build : Build a set of stimela images
pull : pull a stimela base images
run : Run a stimela script
cabs : Manage cab images
""")
raise SystemExit
# Separate commands into command and arguments
cmd, argv = args.command[0], args.command[1:]
# If we've got past the if statement above, and help
# is the command then assume that help on a command
# is requested
if cmd == "help":
# Request help on the sub-command
cmd, argv = argv[0], ["-h"]
else:
argv = options
# Get the function to execute for the command
try:
_cmd = commands[cmd]
except KeyError:
raise KeyError("Command '{:s}' not recognized "
"Run : 'stimela help' for help".format(cmd))
# Invoke the command
_cmd(argv)
| 14,491 | 34.004831 | 153 | py |
Stimela | Stimela-master/stimela/exceptions.py | # -*- coding: future_fstrings -*-
class StimelaCabParameterError(Exception):
pass
class StimelaRecipeExecutionError(Exception):
pass
class StimelaBaseImageError(Exception):
pass
class PipelineException(Exception):
"""
Encapsulates information about state of pipeline when an
exception occurs
"""
def __init__(self, exception, completed, failed, remaining):
message = ("Job '%s' failed: %s" % (failed.label, str(exception)))
super(PipelineException, self).__init__(message)
self._completed = completed
self._failed = failed
self._remaining = remaining
@property
def completed(self):
return self._completed
@property
def failed(self):
return self._failed
@property
def remaining(self):
return self._remaining
| 839 | 20.538462 | 74 | py |
Stimela | Stimela-master/stimela/pathformatter.py | from __future__ import print_function
import re
import copy
#placeholders
class placeholder:
def __init__(self, val):
if val not in ["input", "msfile", "output"]:
raise ValueError("Only accepts input, output or msfile for placeholder argument")
self.__val = val
def __call__(self):
return self.__val
def __get__(self):
return self.__val
def __repr__(self):
return "Placeholder(type {})".format(self.__val)
class pathformatter:
'''
Wrapper for path variables that need further expansion
to include base directories
expect:
pattern: of the format (({})?[\s\S-[{}]]*)*, {} indicate placeholders where
paths are to be inserted. \{ and \} escapes these groups
*args: of special string types input, output and
msfile. Will attempt to replace placeholders in order
of *args specification
Example:
...
{
'model': pathformatter("MODEL_DATA:{}/mod2.lsm:{}/mod3.DicoModel", "output", "input")
}...
This will create placeholders for output and input on the second and third files
respectively.
'''
def __init__(self, val=None, *args):
if not isinstance(val, str):
raise ValueError("argument must be of type string")
self.__val = val
self.__args = list(map(lambda x: placeholder(x), args[::-1]))
def __call__(self):
""" returns list with mixed value types str, input, msdir or output """
args = list(copy.deepcopy(self.__args))
exp = re.compile(r"((?P<R>{})?(?P<T>(?!{})[\S\s]))")
expr_list = []
esc = re.compile(r"(\\{|\\})").split(self.__val) + [None]
for v, delim in zip(esc[::2],
esc[1::2]):
for m in exp.finditer(v):
if m.groupdict()["R"] is not None:
if len(args) == 0:
raise RuntimeError("cannot replace format string - not enough arguments specified")
expr_list.append(args.pop())
expr_list.append(m.groupdict()["T"])
else:
if len(expr_list) > 0:
expr_list[-1] += m.groupdict()["T"]
else:
expr_list.append(m.groupdict()["T"])
if delim:
if len(expr_list) > 0:
expr_list[-1] += delim[-1]
else:
expr_list.append(delim[-1])
if len(args) != 0:
raise RuntimeError("could not replace all arguments")
return expr_list
if __name__ == "__main__":
p = pathformatter("MODEL_DATA+-{}/bla\{ab\}.DicoModel@{}/poly.reg:{}/abc.lsm",
"output",
"output",
"input")
print(p())
| 2,877 | 34.975 | 107 | py |
Stimela | Stimela-master/stimela/singularity.py | import subprocess
import os
import sys
from stimela import utils
from stimela.cargo import cab
import json
import stimela
import time
import datetime
import tempfile
import hashlib
from shutil import which
version = None
for item in ["apptainer", "singularity"]:
BINARY = which(item)
BINARY_NAME = item
if BINARY:
__version_string = subprocess.check_output([BINARY, "--version"]).decode("utf8")
version = __version_string.strip().split()[-1]
if BINARY_NAME == "singularity" and version < "3.0.0":
suffix = ".img"
else:
suffix = ".sif"
break
class SingularityError(Exception):
pass
def pull(image, name, docker=True, directory=".", force=False):
"""
pull an image
"""
if docker:
fp = "docker://{0:s}".format(image)
else:
fp = image
if not os.path.exists(directory):
os.mkdir(directory)
image_path = os.path.abspath(os.path.join(directory, name))
if os.path.exists(image_path) and not force:
stimela.logger().info(f"Singularity image already exists at '{image_path}'. To replace it, please re-run with the 'force' option")
else:
utils.xrun(f"cd {directory} && {BINARY}", ["pull",
"--force" if force else "", "--name",
name, fp])
return 0
class Container(object):
def __init__(self, image, name,
volumes=None,
logger=None,
time_out=-1,
runscript="/singularity",
environs=None,
workdir=None,
execdir=".",
):
"""
Python wrapper to singularity tools for managing containers.
"""
self.image = image
self.volumes = volumes or []
self.environs = environs or []
self.logger = logger
self.status = None
self.WORKDIR = workdir
self.RUNSCRIPT = runscript
self.PID = os.getpid()
self.uptime = "00:00:00"
self.time_out = time_out
self.execdir = execdir
self._env = os.environ.copy()
hashname = hashlib.md5(name.encode('utf-8')).hexdigest()[:3]
self.name = hashname if version < "3.0.0" else name
def add_volume(self, host, container, perm="rw", noverify=False):
if os.path.exists(host) or noverify:
if self.logger:
self.logger.debug("Mounting volume [{0}] in container [{1}] at [{2}]".format(
host, self.name, container))
host = os.path.abspath(host)
else:
raise IOError(
"Path {0} cannot be mounted on container: File doesn't exist".format(host))
self.volumes.append(":".join([host, container, perm]))
return 0
def add_environ(self, key, value):
self.logger.debug("Adding environ varaible [{0}={1}] "\
"in container {2}".format(key, value, self.name))
self.environs.append("=".join([key, value]))
key_ = f"{BINARY_NAME.upper()}ENV_{key}"
self.logger.debug(f"Setting singularity environmental variable {key_}={value} on host")
self._env[key_] = value
return 0
def run(self, *args, output_wrangler=None):
"""
Run a singularity container instance
"""
if self.volumes:
volumes = " --bind " + " --bind ".join(self.volumes)
else:
volumes = ""
if not os.path.exists(self.image):
self.logger.error(f"The image, {self.image}, required to run this cab does not exist."\
" Please run 'stimela pull --help' for help on how to download the image")
raise SystemExit from None
self.status = "running"
extras = "--userns" if BINARY_NAME == "singularity" else "--writable-tmpfs"
self._print("Starting container [{0:s}]. Timeout set to {1:d}. The container ID is printed below.".format(
self.name, self.time_out))
utils.xrun(f"{BINARY} run --workdir {self.execdir} --containall {extras}",
list(args) + [volumes, self.image, self.RUNSCRIPT],
log=self.logger, timeout=self.time_out, output_wrangler=output_wrangler,
env=self._env, logfile=self.logfile)
self.status = "exited"
return 0
def _print(self, message):
if self.logger:
self.logger.info(message)
else:
print(message)
return 0
| 4,511 | 30.333333 | 138 | py |
Stimela | Stimela-master/stimela/docker.py | # -*- coding: future_fstrings -*-
import subprocess
import os
import sys
from io import StringIO
from stimela import utils
import json
import stimela
import time
import datetime
import tempfile
class DockerError(Exception):
pass
def build(image, build_path, tag=None, build_args=None, fromline=None, args=[]):
""" build a docker image"""
if tag:
image = ":".join([image, tag])
bdir = tempfile.mkdtemp()
os.system('cp -r {0:s}/* {1:s}'.format(build_path, bdir))
if build_args:
stdw = tempfile.NamedTemporaryFile(dir=bdir, mode='w')
with open("{}/Dockerfile".format(bdir)) as std:
dfile = std.readlines()
for line in dfile:
if fromline and line.lower().startswith('from'):
stdw.write('FROM {:s}\n'.format(fromline))
elif line.lower().startswith("cmd") or line == dfile[-1]:
for arg in build_args:
stdw.write(arg+"\n")
stdw.write(line)
else:
stdw.write(line)
stdw.flush()
utils.xrun("docker build", args+["--force-rm", "-f", stdw.name,
"-t", image,
bdir])
stdw.close()
else:
utils.xrun("docker build", args+["--force-rm", "-t", image,
bdir])
os.system('rm -rf {:s}'.format(bdir))
def pull(image, tag=None, force=False):
""" pull a docker image """
if tag:
image = ":".join([image, tag])
utils.xrun("docker", ["pull", image])
def seconds_hms(seconds):
return str(datetime.timedelta(seconds=seconds))
class Container(object):
def __init__(self, image, name,
volumes=None, environs=None,
label="", logger=None,
time_out=-1,
workdir=None,
log_container=None,
cabname=None,
runscript=None):
"""
Python wrapper to docker engine tools for managing containers.
"""
self.image = image
self.name = name
self.cabnane = cabname
self.label = label
self.volumes = volumes or []
self.environs = environs or []
self.logger = logger
self.status = None
self.WORKDIR = workdir
self.RUNSCRIPT = runscript
self.PID = os.getpid()
self.uptime = "00:00:00"
self.time_out = time_out
def add_volume(self, host, container, perm="rw", noverify=False):
if os.path.exists(host) or noverify:
if self.logger:
self.logger.debug("Mounting volume [{0}] in container [{1}] at [{2}]".format(
host, self.name, container))
host = os.path.abspath(host)
else:
raise IOError(
"Directory {0} cannot be mounted on container: File doesn't exist".format(host))
self.volumes.append(":".join([host, container, perm]))
def add_environ(self, key, value):
if self.logger:
self.logger.debug("Adding environ varaible [{0}={1}] in container {2}".format(
key, value, self.name))
self.environs.append("=".join([key, value]))
def create(self, *args):
if self.volumes:
volumes = " -v " + " -v ".join(self.volumes)
else:
volumes = ""
if self.environs:
environs = environs = " -e "+" -e ".join(self.environs)
else:
environs = ""
self._print(
"Instantiating container [{}]. The container ID is printed below.".format(self.name))
utils.xrun("docker create", list(args) + [volumes, environs, "--rm",
"-w %s" % (self.WORKDIR),
"--name", self.name,
self.image,
self.RUNSCRIPT or ""], log=self.logger)
self.status = "created"
def info(self):
output = subprocess.check_output(
"docker inspect {}".format(self.name), shell=True).decode()
output_file = StringIO(output[3:-3])
jdict = json.load(output_file)
output_file.close()
return jdict
def get_log(self):
stdout = open(self.logfile, 'w')
exit_status = subprocess.call("docker logs {0}".format(self.name),
stdout=stdout, stderr=stdout, shell=True)
if exit_status != 0:
self.logger.warn(
'Could not log container: {}. Something went wrong durring execution'.format(self.name))
output = 'Task was not started.'
stdout.write(output)
else:
output = stdout.read()
stdout.close()
return output
def start(self, output_wrangler=None):
running = True
tstart = time.time()
self.status = "running"
self._print("Starting container [{0:s}]. Timeout set to {1:d}. The container ID is printed below.".format(
self.name, self.time_out))
utils.xrun("docker", ["start", "-a", self.name],
timeout=self.time_out,
logfile=self.logfile,
log=self.logger, output_wrangler=output_wrangler,
kill_callback=lambda: utils.xrun("docker", ["kill", self.name]))
uptime = seconds_hms(time.time() - tstart)
self.uptime = uptime
self._print(
"Container [{0}] has executed successfully".format(self.name))
self._print("Runtime was {0}.".format(uptime))
self.status = "exited"
def stop(self):
dinfo = self.info()
status = dinfo["State"]["Status"]
killed = False
if status in ["running", "paused"]:
try:
utils.xrun("docker stop", [self.name])
except KeyboardInterrupt("Received terminate signal. Will stop and remove container first"):
killed = True
self.status = 'exited'
self._print("Container {} has been stopped.".format(self.name))
if killed:
self.remove()
raise KeyboardInterrupt
def image_exists(self):
"""
Check if image exists
"""
image_ids = subprocess.check_output(f"docker images -q {self.image}".split())
if image_ids:
return True
else:
return False
def remove(self):
dinfo = self.info()
status = dinfo["State"]["Status"]
killed = False
if status == "exited":
try:
utils.xrun("docker rm", [self.name])
except KeyboardInterrupt:
killed = True
if killed:
raise KeyboardInterrupt
else:
raise DockerError(
"Container [{}] has not been stopped, cannot remove".format(self.name))
def _print(self, message):
if self.logger:
self.logger.info(message)
else:
print(message)
| 7,200 | 31.147321 | 114 | py |
Stimela | Stimela-master/stimela/podman.py | # -*- coding: future_fstrings -*-
import subprocess
import os
import sys
from io import StringIO
from stimela import utils
import json
import stimela
import time
import datetime
import tempfile
class DockerError(Exception):
pass
def build(image, build_path, tag=None, build_args=None, fromline=None, args=[]):
""" build a podman image"""
if tag:
image = ":".join([image, tag])
bdir = tempfile.mkdtemp()
os.system('cp -r {0:s}/* {1:s}'.format(build_path, bdir))
if build_args:
stdw = tempfile.NamedTemporaryFile(dir=bdir, mode='w')
with open("{}/Dockerfile".format(bdir)) as std:
dfile = std.readlines()
for line in dfile:
if fromline and line.lower().startswith('from'):
stdw.write('FROM {:s}\n'.format(fromline))
elif line.lower().startswith("cmd"):
for arg in build_args:
stdw.write(arg+"\n")
stdw.write(line)
else:
stdw.write(line)
stdw.flush()
utils.xrun("podman build", args+["--force-rm", "--no-cache", "-f", stdw.name,
"-t", image,
bdir])
stdw.close()
else:
utils.xrun("podman build", args+["--force-rm", "--no-cache", "-t", image,
bdir])
os.system('rm -rf {:s}'.format(bdir))
def pull(image, tag=None, force=False):
""" pull a podman image """
if tag:
image = ":".join([image, tag])
utils.xrun("podman", ["pull", "docker.io/"+image])
def seconds_hms(seconds):
return str(datetime.timedelta(seconds=seconds))
class Container(object):
def __init__(self, image, name,
volumes=None, environs=None,
label="", logger=None,
shared_memory="1gb",
time_out=-1,
log_container=None,
workdir=None,
runscript=None):
"""
Python wrapper to podman engine tools for managing containers.
"""
self.image = image
self.name = name
self.label = label
self.volumes = volumes or []
self.environs = environs or []
self.logger = logger
self.status = None
self.WORKDIR = workdir
self.RUNSCRIPT = runscript
self.shared_memory = shared_memory
self.PID = os.getpid()
self.uptime = "00:00:00"
self.time_out = time_out
def add_volume(self, host, container, perm="rw", noverify=False):
if os.path.exists(host) or noverify:
if self.logger:
self.logger.debug("Mounting volume [{0}] in container [{1}] at [{2}]".format(
host, self.name, container))
host = os.path.abspath(host)
else:
raise IOError(
"Directory {0} cannot be mounted on container: File doesn't exist".format(host))
self.volumes.append(":".join([host, container, perm]))
def add_environ(self, key, value):
if self.logger:
self.logger.debug("Adding environ varaible [{0}={1}] in container {2}".format(
key, value, self.name))
self.environs.append("=".join([key, value]))
def create(self, *args):
if self.volumes:
volumes = " -v " + " -v ".join(self.volumes)
else:
volumes = ""
if self.environs:
environs = environs = " -e "+" -e ".join(self.environs)
else:
environs = ""
# non-root podman users cannot allocate resources on all linux kernel setups. So minisise stress I don't
# allow it
args = list(args)
for rsrc in ["--memory", "--cpus", "--user"]:
for arg in args:
if arg.startswith(rsrc):
args.remove(arg)
self._print(
"Instantiating container [{}]. The container ID is printed below.".format(self.name))
utils.xrun("podman create", args + [volumes, environs, "--rm",
"-w %s" % (self.WORKDIR) if self.WORKDIR else "",
"--name", self.name, "--shm-size", self.shared_memory,
self.image,
self.RUNSCRIPT or ""], log=self.logger)
self.status = "created"
def info(self):
output = subprocess.check_output(
"podman inspect {}".format(self.name), shell=True).decode()
output_file = StringIO(output[3:-3])
jdict = json.load(output_file)
output_file.close()
return jdict
def get_log(self):
stdout = open(self.logfile, 'w')
exit_status = subprocess.call("podman logs {0}".format(self.name),
stdout=stdout, stderr=stdout, shell=True)
if exit_status != 0:
self.logger.warn(
'Could not log container: {}. Something went wrong durring execution'.format(self.name))
output = 'Task was not started.'
stdout.write(output)
else:
output = stdout.read()
stdout.close()
return output
def start(self, output_wrangler=None):
running = True
tstart = time.time()
self.status = "running"
self._print("Starting container [{0:s}]. Timeout set to {1:d}. The container ID is printed below.".format(
self.name, self.time_out))
utils.xrun("podman", ["start", "-a", self.name],
timeout=self.time_out,
kill_callback=lambda: utils.xrun("podman", ["kill", self.name]),
logfile=self.logfile, output_wrangler=output_wrangler,
log=self.logger)
uptime = seconds_hms(time.time() - tstart)
self.uptime = uptime
self._print(
"Container [{0}] has executed successfully".format(self.name))
self._print("Runtime was {0}.".format(uptime))
self.status = "exited"
def stop(self):
dinfo = self.info()
status = dinfo["State"]["Status"]
killed = False
if status in ["running", "paused"]:
try:
utils.xrun("podman stop", [self.name])
except KeyboardInterrupt("Received terminate signal. Will stop and remove container first"):
killed = True
self.status = 'exited'
self._print("Container {} has been stopped.".format(self.name))
if killed:
self.remove()
raise KeyboardInterrupt
def remove(self):
dinfo = self.info()
status = dinfo["State"]["Status"]
killed = False
if status == "exited":
try:
utils.xrun("podman rm", [self.name])
except KeyboardInterrupt:
killed = True
if killed:
raise KeyboardInterrupt
else:
raise DockerError(
"Container [{}] has not been stopped, cannot remove".format(self.name))
def _print(self, message):
if self.logger:
self.logger.info(message)
else:
print(message)
| 7,316 | 31.52 | 114 | py |
Stimela | Stimela-master/stimela/__init__.py | # -*- coding: future_fstrings -*-
import os
import sys
import inspect
import pkg_resources
import logging
from logging import StreamHandler
import re
from pathlib import Path
import getpass
import time
try:
__version__ = pkg_resources.require("stimela")[0].version
except pkg_resources.DistributionNotFound:
__version__ = "dev"
# Get to know user
try:
USER = getpass.getuser()
except:
# The point is to avoid containers with the same name when using there multiple users using docker
USER = hex(id(time.ctime()))
CAB_USERNAME = re.sub('[^0-9a-zA-Z]+', '_', USER).lower()
root = os.path.dirname(__file__)
CAB_PATH = os.path.join(root, "cargo/cab")
BASE_PATH = os.path.join(root, "cargo/base")
GLOBALS = {'foo': 'bar'}
del GLOBALS['foo']
def register_globals():
frame = inspect.currentframe().f_back
frame.f_globals.update(GLOBALS)
# Get base images
# All base images must be on dockerhub
BASE = os.listdir(BASE_PATH)
CAB = list()
for item in os.listdir(CAB_PATH):
try:
# These files must exist for a cab image to be valid
ls_cabdir = os.listdir('{0}/{1}'.format(CAB_PATH, item))
paramfile = 'parameters.json' in ls_cabdir
srcdir = 'src' in ls_cabdir
except OSError:
continue
if paramfile and srcdir:
CAB.append(item)
_logger = None
from .utils.logger import SelectiveFormatter, ColorizingFormatter, ConsoleColors, MultiplexingHandler
log_console_handler = log_formatter = log_boring_formatter = log_colourful_formatter = None
def is_logger_initialized():
return _logger is not None
def logger(name="STIMELA", propagate=False, console=True, boring=False,
fmt="{asctime} {name} {levelname}: {message}",
col_fmt="{asctime} {name} %s{levelname}: {message}%s"%(ConsoleColors.BEGIN, ConsoleColors.END),
sub_fmt="# {message}",
col_sub_fmt="%s# {message}%s"%(ConsoleColors.BEGIN, ConsoleColors.END),
datefmt="%Y-%m-%d %H:%M:%S", loglevel="INFO"):
"""Returns the global Stimela logger (initializing if not already done so, with the given values)"""
global _logger
if _logger is None:
_logger = logging.getLogger(name)
_logger.setLevel(getattr(logging, loglevel))
_logger.propagate = propagate
global log_console_handler, log_formatter, log_boring_formatter, log_colourful_formatter
# this function checks if the log record corresponds to stdout/stderr output from a cab
def _is_from_subprocess(rec):
return hasattr(rec, 'stimela_subprocess_output')
log_boring_formatter = SelectiveFormatter(
logging.Formatter(fmt, datefmt, style="{"),
[(_is_from_subprocess, logging.Formatter(sub_fmt, datefmt, style="{"))])
log_colourful_formatter = SelectiveFormatter(
ColorizingFormatter(col_fmt, datefmt, style="{"),
[(_is_from_subprocess, ColorizingFormatter(fmt=col_sub_fmt, datefmt=datefmt, style="{",
default_color=ConsoleColors.DIM))])
log_formatter = log_boring_formatter if boring else log_colourful_formatter
if console:
if "SILENT_STDERR" in os.environ and os.environ["SILENT_STDERR"].upper()=="ON":
log_console_handler = StreamHandler(stream=sys.stdout)
else:
log_console_handler = MultiplexingHandler()
log_console_handler.setFormatter(log_formatter)
log_console_handler.setLevel(getattr(logging, loglevel))
_logger.addHandler(log_console_handler)
return _logger
from stimela.recipe import Recipe
| 3,703 | 33.296296 | 107 | py |
Stimela | Stimela-master/stimela/dismissable.py | # -*- coding: future_fstrings -*-
class dismissable:
'''
Wrapper for optional parameters to stimela
Initialize with val == None to force stimela to skip
parsing parameter.
'''
def __init__(self, val=None):
self.__val = val
def __call__(self):
return self.__val
| 307 | 21 | 56 | py |
Stimela | Stimela-master/stimela/recipe.py | # -*- coding: future_fstrings -*-
import os
import sys
import pwd, grp
import time
import stimela
from stimela import docker, singularity, utils, cargo, podman, main
from stimela.cargo import cab
import logging
import inspect
import re
from stimela.exceptions import *
from stimela.dismissable import dismissable
from stimela.cargo.cab import StimelaCabParameterError
from datetime import datetime
import traceback
import shutil
import re
version = stimela.__version__
UID = os.getuid()
GID = os.getgid()
CAB_PATH = os.path.abspath(os.path.dirname(cab.__file__))
CONT_MOD = {
"docker" : docker,
"singularity" : singularity,
"podman" : podman
}
CONT_IO = cab.IODEST
CDIR = os.getcwd()
PULLFOLDER = os.environ.get("STIMELA_PULLFOLDER", CDIR)
# make dictionary of wrangler actions. First, add all logging levels
_actions = {attr: value for attr, value in logging.__dict__.items() if attr.upper() == attr and type(value) is int}
# then add constants for other wrangler actions
_SUPPRESS = _actions["SUPPRESS"] = "SUPPRESS"
_DECLARE_SUCCESS = _actions["DECLARE_SUCCESS"] = "DECLARE_SUPPRESS"
_DECLARE_FAILURE = _actions["DECLARE_FAILURE"] = "DECLARE_FAILURE"
class StimelaJob(object):
logs_avail = dict()
def __init__(self, name, recipe, label=None,
jtype='docker', cpus=None, memory_limit=None,
singularity_dir=None,
time_out=-1,
logger=None,
logfile=None,
cabpath=None,
workdir=None,
tag=None,
version=None,
force_tag=False,
shared_memory=None):
"""
logger: if set to a logger object, uses the specified logger.
if None, sets up its own logger using the parameters below
logfile: name of logfile, False to disable recipe-level logfiles, or None to form a default name
"""
self.tag = tag
self.version = version
self.force_tag = force_tag
self.name = name
self.recipe = recipe
self.label = label or '{0}({1})'.format(name, id(name))
self.log = recipe.log
self._log_fh = None
self.active = False
self.jtype = jtype # ['docker', 'python', singularity']
self.job = None
self.created = False
self.wranglers = []
self.args = ['--user {}:{}'.format(UID, GID)]
if cpus:
self.args.append("--cpus {0:f}".format(cpus))
if memory_limit:
self.args.append("--memory {0:s}".format(memory_limit))
if shared_memory:
self.args.append("--shm-size {0:s}".format(shared_memory))
self.time_out = time_out
self.logfile = logfile
if self.logfile is not False:
self.logfile = logfile or "log-{0:s}.txt".format(self.name)
self.cabpath = cabpath
self.workdir = workdir
def setup_job_log(self, log_name=None, loglevel=None):
""" set up a log for the job on the host side
log_name: preferably unique name for this jobs log
log_dir: log base directory, None is current directory
"""
loglevel = loglevel or self.recipe.loglevel
log_name = log_name or self.name
if log_name not in StimelaJob.logs_avail:
self.log = stimela.logger().getChild(log_name)
if self.logfile is not False:
log_dir = os.path.dirname(self.logfile) or "."
if not os.path.exists(log_dir):
os.mkdir(log_dir)
self._log_fh = logging.FileHandler(self.logfile, 'w', delay=True)
self._log_fh.setLevel(getattr(logging, loglevel))
self.log.addHandler(self._log_fh)
self.log.propagate = True # propagate also to main stimela logger
StimelaJob.logs_avail[log_name] = self.log
else:
self.log = StimelaJob.logs_avail[log_name]
def setup_output_wranglers(self, wranglers):
self._wranglers = []
if not wranglers:
return
if type(wranglers) is not dict:
raise utils.StimelaCabRuntimeError("wranglers: dict expected")
for match, actions in wranglers.items():
replace = None
if type(actions) is str:
actions = [actions]
if type(actions) is not list:
raise utils.StimelaCabRuntimeError(f"wrangler entry {match}: expected action or list of action")
for action in actions:
if action.startswith("replace:"):
replace = action.split(":", 1)[1]
elif action not in _actions:
raise utils.StimelaCabRuntimeError(f"wrangler entry {match}: unknown action '{action}'")
actions = [_actions[act] for act in actions if act in _actions]
self._wranglers.append((re.compile(match), replace, actions))
def apply_output_wranglers(self, output, severity, logger):
suppress = False
modified_output = output
for regex, replace, actions in self._wranglers:
if regex.search(output):
if replace is not None:
modified_output = regex.sub(replace, output)
for action in actions:
if type(action) is int:
severity = action
elif action is _SUPPRESS:
suppress = True
elif action is _DECLARE_FAILURE and self.declare_status is None:
self.declare_status = False
modified_output = "[FAILURE] " + modified_output
severity = logging.ERROR
elif action is _DECLARE_SUCCESS and self.declare_status is None:
self.declare_status = True
modified_output = "[SUCCESS] " + modified_output
return (None, 0) if suppress else (modified_output, severity)
def setup_job(self, image, config,
indir=None, outdir=None, msdir=None,
singularity_image_dir=None, repository=None):
"""
Setup job
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
indir : input dirctory for cab
outdir : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes
"""
if self.jtype == "python":
self.image = image.__name__
if not callable(image):
raise utils.StimelaCabRuntimeError(
'Object given as function is not callable')
if self.name is None:
self.name = image.__name__
self.job = {
'function': image,
'parameters': config,
}
self.setup_job_log()
return 0
# check if name has any offending characters
offenders = re.findall('[^\w .-]', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' contains invalid characters.'
' Allowed charcaters are alphanumeric, plus [-_. ].'.format(self.name))
self.setup_job_log()
# make name palatable as container name
pausterized_name = re.sub("[\W]", "_", self.name)
name = '{0}-{1}{2}'.format(pausterized_name, id(image),
str(time.time()).replace('.', ''))
cont = getattr(CONT_MOD[self.jtype], "Container")(image, name,
logger=self.log,
workdir=CONT_IO["output"],
time_out=self.time_out)
cabpath = os.path.join(CAB_PATH, image.split("/")[1])
# In case the user specified a custom cab
cabpath = os.path.join(self.cabpath, image.split("/")[1]) if self.cabpath else cabpath
parameter_file = os.path.join(cabpath, 'parameters.json')
_cab = cab.CabDefinition(indir=indir, outdir=outdir,
msdir=msdir, parameter_file=parameter_file)
param = utils.readJson(parameter_file)
_repository = param.get("hub", repository)
self.setup_output_wranglers(_cab.wranglers)
cont.IODEST = CONT_IO
cont.cabname = _cab.task
#
#Example
# ----------------
# casa_listobs:
# tag: <tag> ## optional
# version: <version> ## optional. If version is a dict, then ignore tag and priority and use <tag>:<version> pairs in dict
# force: true ## Continue even if tag is specified in the parameters.json file
no_tag_version = False
if self.tag or self.version:
tvi = None
if self.tag:
try:
tvi = _cab.tag.index(self.tag)
except ValueError:
pass
elif self.version:
try:
tvi = _cab.version.index(self.version)
except ValueError:
self.log.error(f"The version, {self.version}, specified for cab '{_cab.task}' is unknown. Available versions are {_cab.version}")
raise ValueError
if tvi is None:
tvi = -1
self.tag = _cab.tag[tvi]
self.version = _cab.version[tvi]
else:
self.tag = _cab.tag[-1]
self.version = _cab.version[-1]
cabspecs = self.recipe.cabspecs
if cabspecs:
_tag = cabspecs.get("tag", None)
_version = cabspecs.get("version", None)
_force_tag = cabspecs.get("force", False)
if isinstance(_version, dict):
if self.version in _version:
self.tag = _version[self.version]
elif _version:
self.version = _version
else:
self.tag = _tag
if self.version and self.version not in _cab.version:
self.log.error(f"The version, {self.version}, specified for cab '{_cab.task}' is unknown. Available versions are {_cab.version}")
raise ValueError
if not _tag:
idx = _cab.version.index(self.version)
self.tag = _cab.tag[idx]
self.force_tag = _force_tag
if self.tag not in _cab.tag:
if self.force_tag:
self.log.warn(f"You have chosen to use an unverified base image '{_cab.base}:{self.tag}'. May the force be with you.")
else:
raise StimelaBaseImageError(f"The base image '{_cab.base}' with tag '{self.tag}' has not been verified. If you wish to continue with it, please add the 'force_tag' when adding it to your recipe")
if _repository:
image_url = f"{_repository}/{_cab.base}:{self.tag}" if _repository != "docker" and _repository != "docker.io" else \
f"{_cab.base}:{self.tag}"
else:
image_url = f"{_cab.base}:{self.tag}"
if self.jtype == "singularity":
simage = _cab.base.replace("/", "_")
cont.image = '{0:s}/{1:s}_{2:s}{3:s}'.format(singularity_image_dir,
simage, self.tag, singularity.suffix)
cont.image = os.path.abspath(cont.image)
if not os.path.exists(cont.image):
singularity.pull(image_url,
os.path.basename(cont.image), directory=singularity_image_dir)
else:
cont.image = image_url
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(
self.recipe.parameter_file_dir, name)
self.image = str(cont.image)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(cont.parameter_file_name,
f'{cab.MOUNT}/configfile', perm='ro', noverify=True)
cont.add_volume(os.path.join(cabpath, "src"), f"{cab.MOUNT}/code", "ro")
cont.add_volume(os.path.join(self.workdir, "passwd"), "/etc/passwd")
cont.add_volume(os.path.join(self.workdir, "group"), "/etc/group")
cont.RUNSCRIPT = f"/{self.jtype}_run"
if self.jtype == "singularity":
cont.RUNSCRIPT = f"/{self.jtype}"
if _cab.base.startswith("stimela/casa") or _cab.base.startswith("stimela/simms"):
cont.add_environ("LANGUAGE", "en_US.UTF-8")
cont.add_environ("LANG", "en_US.UTF-8")
cont.add_environ("LC_ALL", "en_US.UTF-8")
cont.execdir = self.workdir
else:
cont.RUNSCRIPT = f"/{self.jtype}_run"
cont.add_environ('HOME', cont.IODEST["output"])
runscript = shutil.which("stimela_runscript")
if runscript:
cont.add_volume(runscript,
cont.RUNSCRIPT, perm="ro")
else:
self.log.error("Stimela container runscript could not found.\
This may due to conflicting python or stimela installations in your $PATH.")
raise OSError
cont.add_environ('CONFIG', f'{cab.MOUNT}/configfile')
cont.add_environ('STIMELA_MOUNT', cab.MOUNT)
if msdir:
md = cont.IODEST["msfile"]
os.makedirs(msdir, exist_ok=True)
cont.add_volume(msdir, md)
cont.add_environ("MSDIR", md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume": dirname,
"dirs": dirs,
"files": files,
}
self.log.debug(
'Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if indir:
cont.add_volume(indir, cont.IODEST["input"], perm='ro')
cont.add_environ("INPUT", cont.IODEST["input"])
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(indir))]
cont.input_content = {
"volume": dirname,
"dirs": dirs,
"files": files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(
indir, cont.IODEST["input"]))
os.makedirs(outdir, exist_ok=True)
od = cont.IODEST["output"]
cont.logfile = self.logfile
cont.add_volume(outdir, od, "rw")
cont.add_environ("OUTPUT", od)
# temp files go into output
tmpfol = os.path.join(outdir, "tmp")
if not os.path.exists(tmpfol):
os.mkdir(tmpfol)
cont.add_volume(tmpfol, cont.IODEST["tmp"], "rw")
cont.add_environ("TMPDIR", cont.IODEST["tmp"])
self.log.debug(
'Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(outdir, od))
# Added and ready for execution
self.job = cont
return 0
def run_job(self):
self.declare_status = None
if isinstance(self.job, dict):
function = self.job['function']
options = self.job['parameters']
function(**options)
return 0
if hasattr(self.job, '_cab'):
self.job._cab.update(self.job.config,
self.job.parameter_file_name, tag=self.tag)
if self.jtype == "singularity":
self.created = True
self.job.run(output_wrangler=self.apply_output_wranglers)
elif self.jtype in ["podman", "docker"]:
self.created = False
self.job.create(*self.args)
self.created = True
self.job.start(output_wrangler=self.apply_output_wranglers)
return 0
def close(self):
"""Call this to explicitly clean up after the job"""
if self._log_fh is not None:
self._log_fh.close()
def __del__(self):
self.close()
class Recipe(object):
def __init__(self, name, data=None,
parameter_file_dir=None, ms_dir=None,
build_label=None,
singularity_image_dir=None, JOB_TYPE='docker',
cabpath=None,
logger=None,
msdir=None,
indir=None,
outdir=None,
log_dir=None, logfile=None, logfile_task=None,
cabspecs=None,
repository="quay.io",
loglevel="INFO"):
"""
Deifine and manage a stimela recipe instance.
name : Name of stimela recipe
msdir : Path of MSs to be used during the execution of the recipe
parameter_file_dir : Will store task specific parameter files here
logger: if set to a logger object, uses the specified logger.
if None, sets up its own logger using the parameters below
loglevel: default logging level
log_dir: default directory for logfiles
logfile: name of logfile, False to disable recipe-level logfiles, or None to form a default name
logfile_task: name of task-level logfile, False to disable task-level logfiles, or None to form a default name.
logfile_task may contain a "{task}" entry which will be substituted for a task name.
"""
self.name = name
self.repository = repository
self.name_ = re.sub(r'\W', '_', name) # pausterized name
self.stimela_context = inspect.currentframe().f_back.f_globals
self.stimela_path = os.path.dirname(docker.__file__)
# Update I/O with values specified on command line
self.indir = indir
self.outdir = outdir
self.msdir = self.ms_dir = msdir or ms_dir
self.loglevel = self.stimela_context.get('_STIMELA_LOG_LEVEL', None) or loglevel
self.JOB_TYPE = self.stimela_context.get('_STIMELA_JOB_TYPE', None) or JOB_TYPE
self.cabpath = cabpath
self.cabspecs = cabspecs or {}
# set default name for task-level logfiles
self.logfile_task = "{0}/log-{1}-{{task}}".format(log_dir or ".", self.name_) \
if logfile_task is None else logfile_task
self._log_fh = None
if logger is not None:
self.log = logger
else:
logger = stimela.logger(loglevel=self.loglevel)
self.log = logger.getChild(name)
self.log.propagate = True # propagate to main stimela logger
# logfile is False: no logfile at recipe level
if logfile is not False:
# logfile is None: use default name
if logfile is None:
logfile = "{0}/log-{1}.txt".format(log_dir or ".", self.name_)
# reset default name for task-level logfiles based on logfile
self.logfile_task = os.path.splitext(logfile)[0] + "-{task}.txt" \
if logfile_task is None else logfile_task
# ensure directory exists
log_dir = os.path.dirname(logfile) or "."
if not os.path.exists(log_dir):
self.log.info('creating log directory {0:s}'.format(log_dir))
os.makedirs(log_dir)
self._log_fh = logging.FileHandler(logfile, 'w', delay=True)
self._log_fh.setLevel(getattr(logging, self.loglevel))
self._log_fh.setFormatter(stimela.log_formatter)
self.log.addHandler(self._log_fh)
self.resume_file = '.last_{}.json'.format(self.name_)
# set to default if not set
# create a folder to store config files
# if it doesn't exist. These config
# files can be resued to re-run the
# task
self.jobs = []
self.completed = []
self.failed = None
self.remaining = []
self.pid = os.getpid()
cmd_line_pf = self.stimela_context.get('_STIMELA_PULLFOLDER', None)
self.singularity_image_dir = cmd_line_pf or singularity_image_dir or PULLFOLDER
if self.singularity_image_dir and not self.JOB_TYPE:
self.JOB_TYPE = "singularity"
self.log.info('---------------------------------')
self.log.info('Stimela version {0}'.format(stimela.__version__))
self.log.info('Running: {:s}'.format(self.name))
self.log.info('---------------------------------')
self.workdir = None
self.__make_workdir()
self.parameter_file_dir = parameter_file_dir or f'{self.workdir}/stimela_parameter_files'
if not os.path.exists(self.parameter_file_dir):
self.log.info(
f'Config directory cannot be found. Will create {self.parameter_file_dir}')
os.mkdir(self.parameter_file_dir)
def __make_workdir(self):
timestamp = str(time.time()).replace(".", "")
self.workdir = os.path.join(CDIR, f".stimela_workdir-{timestamp}")
while os.path.exists(self.workdir):
timestamp = str(time.time()).replace(".", "")
self.workdir = os.path.join(CDIR, f".stimela_workdir-{timestamp}")
os.mkdir(self.workdir)
# create passwd and group files to be mounted inside the container
template_dir = os.path.join(os.path.dirname(__file__), "cargo/base")
# get current user info
pw = pwd.getpwuid(os.getuid())
gr = grp.getgrgid(pw.pw_gid)
with open(os.path.join(self.workdir, "passwd"), "wt") as file:
file.write(open(os.path.join(template_dir, "passwd.template"), "rt").read())
file.write(f"{pw.pw_name}:x:{pw.pw_uid}:{pw.pw_gid}:{pw.pw_gecos}:/:/bin/bash")
with open(os.path.join(self.workdir, "group"), "wt") as file:
file.write(open(os.path.join(template_dir, "group.template"), "rt").read())
file.write(f"{gr.gr_name}:x:{gr.gr_gid}:")
def add(self, image, name, config=None,
input=None, output=None, msdir=None,
label=None, shared_memory='1gb',
build_label=None,
cpus=None, memory_limit=None,
time_out=-1,
logger=None,
logfile=None,
cabpath=None,
tag=None,
version=None,
force_tag=False):
if logfile is None:
logfile = False if self.logfile_task is False else self.logfile_task.format(task=name)
job = StimelaJob(name, recipe=self, label=label,
cpus=cpus, memory_limit=memory_limit,
shared_memory=shared_memory,
time_out=time_out,
jtype=self.JOB_TYPE,
logger=logger, logfile=logfile,
cabpath=cabpath or self.cabpath,
workdir=self.workdir, tag=tag, version=version, force_tag=force_tag)
if callable(image):
job.jtype = 'python'
_indir = self.stimela_context.get('_STIMELA_INPUT', None) or input
_outdir = self.stimela_context.get('_STIMELA_OUTPUT', None) or output
_msdir = self.stimela_context.get('_STIMELA_MSDIR', None) or msdir
# The hirechy is command line, Recipe.add, and then Recipe
indir = _indir or self.indir
outdir = _outdir or self.outdir
msdir = _msdir or self.msdir
job.setup_job(image=image, config=config,
indir=indir, outdir=outdir, msdir=msdir,
singularity_image_dir=self.singularity_image_dir,
repository=self.repository)
self.log.info(f'Adding cab \'{job.image}\' ({job.version}) to recipe, container name \'{name}\'')
self.jobs.append(job)
return 0
def log2recipe(self, job, recipe, num, status):
if job.jtype in ['docker', 'singularity', 'podman']:
cont = job.job
step = {
"name": cont.name,
"number": num,
"cab": cont.image,
"volumes": cont.volumes,
"environs": getattr(cont, "environs", None),
"shared_memory": getattr(cont, "shared_memory", None),
"input_content": cont.input_content,
"msdir_content": cont.msdir_content,
"label": getattr(cont, "label", ""),
"logfile": cont.logfile,
"status": status,
"jtype": job.jtype,
}
else:
step = {
"name": job.name,
"number": num,
"label": job.label,
"status": status,
"function": job.job['function'].__name__,
"jtype": 'function',
"parameters": job.job['parameters'],
}
recipe['steps'].append(step)
return 0
def run(self, steps=None, resume=False, redo=None):
"""
Run a Stimela recipe.
steps : recipe steps to run
resume : resume recipe from last run
redo : Re-run an old recipe from a .last file
"""
recipe = {
"name": self.name,
"steps": []
}
start_at = 0
if redo:
self.log.error("This feature has been depricated")
raise SystemExit
elif resume:
#TODO(sphe) Need to re-think how best to do this
self.log.error("This feature has been depricated")
raise SystemExit
if getattr(steps, '__iter__', False):
_steps = []
if isinstance(steps[0], str):
labels = [job.label.split('::')[0] for job in self.jobs]
for step in steps:
try:
_steps.append(labels.index(step)+1)
except ValueError:
raise StimelaCabParameterError(
'Recipe label ID [{0}] doesn\'t exist'.format(step))
steps = _steps
else:
steps = range(1, len(self.jobs)+1)
jobs = [(step, self.jobs[step-1]) for step in steps]
# TIMESTR = "%Y-%m-%d %H:%M:%S"
# TIMESTR = "%H:%M:%S"
for i, (step, job) in enumerate(jobs):
start_time = datetime.now()
job.log.info('job started at {}'.format(start_time),
# the extra attributes are filtered by e.g. the CARACal logger
extra=dict(stimela_job_state=(job.name, "running")))
self.log.info('STEP {0} :: {1}'.format(i+1, job.label))
self.active = job
try:
with open(job.logfile, 'a') as astd:
astd.write('\n-----------------------------------\n')
astd.write(
'Stimela version : {}\n'.format(version))
astd.write(
'Cab name : {}\n'.format(job.image))
astd.write('-------------------------------------\n')
job.run_job()
# raise exception if wranglers declared the job a failure
if job.declare_status is False:
raise StimelaRecipeExecutionError("job declared as failed")
self.log2recipe(job, recipe, step, 'completed')
self.completed.append(job)
finished_time = datetime.now()
job.log.info('job complete at {} after {}'.format(finished_time, finished_time-start_time),
# the extra attributes are filtered by e.g. the CARACal logger
extra=dict(stimela_job_state=(job.name, "complete")))
except (utils.StimelaCabRuntimeError,
StimelaRecipeExecutionError,
StimelaCabParameterError) as exc:
# ignore exceptions if wranglers declared the job a success
if job.declare_status is True:
finished_time = datetime.now()
job.log.info('job complete (declared successful) at {} after {}'.format(finished_time, finished_time - start_time),
# the extra attributes are filtered by e.g. the CARACal logger
extra=dict(stimela_job_state=(job.name, "complete")))
continue
self.remaining = [jb[1] for jb in jobs[i+1:]]
self.failed = job
finished_time = datetime.now()
job.log.error(str(exc), extra=dict(stimela_job_state=(job.name, "failed"), boldface=True))
job.log.error('job failed at {} after {}'.format(finished_time, finished_time-start_time),
extra=dict(stimela_job_state=(job.name, "failed"), color=None))
for line in traceback.format_exc().splitlines():
job.log.error(line, extra=dict(traceback_report=True))
self.log.info('Completed jobs : {}'.format(
[c.name for c in self.completed]))
self.log.info('Remaining jobs : {}'.format(
[c.name for c in self.remaining]))
self.log2recipe(job, recipe, step, 'failed')
for step, jb in jobs[i+1:]:
self.log.info(
'Logging remaining task: {}'.format(jb.label))
self.log2recipe(jb, recipe, step, 'remaining')
self.log.info(
'Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
# raise pipeline exception. Original exception context is discarded by "from None" (since we've already
# logged it above, we don't need to include it with the new exception)
raise PipelineException(exc, self.completed, job, self.remaining) from None
self.log.info(
'Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
self.log.info('Recipe executed successfully')
return 0
def close(self):
"""Call this to explicitly close the recipe and clean up. Don't call run() after close()!"""
for job in self.jobs:
job.close()
if os.path.exists(self.workdir):
shutil.rmtree(self.workdir)
if self._log_fh is not None:
self._log_fh.close()
def __del__(self):
"""Failsafe"""
self.close()
| 32,121 | 39.919745 | 211 | py |
Stimela | Stimela-master/stimela/tests/acceptance_tests/stimela-test-meerkat.py | # -*- coding: future_fstrings -*-
import stimela
from stimela.pathformatter import pathformatter as spf
import os
import unittest
import subprocess
from nose.tools import timed
import shutil
class mk_reduce(unittest.TestCase):
@classmethod
def setUpClass(cls):
unittest.TestCase.setUpClass()
# I/O
global INPUT
INPUT = 'input'
global MSDIR
MSDIR = 'msdir'
global MS
MS = '1491291289.1GC.ms'
global PREFIX
PREFIX = 'deep2'
global LABEL
LABEL = "test_mkreduction"
global OUTPUT
OUTPUT = "output_%s" % LABEL
stimela.register_globals()
@classmethod
def tearDownClass(cls):
unittest.TestCase.tearDownClass()
#global OUTPUT
# shutil.rmtree(OUTPUT)
def tearDown(self):
unittest.TestCase.tearDown(self)
def setUp(self):
unittest.TestCase.setUp(self)
def testEndToEndReduction(self):
global INPUT, OUTPUT, MSDIR, MS, LABEL
recipe = stimela.Recipe('Test reduction script',
ms_dir=MSDIR, JOB_TYPE="docker", log_dir="logs")
imname1 = "deep2.1gc"
imname2 = "deep2.2gc"
recipe.add("cab/ddfacet", "ddfacet_test1",
{
"Data-MS": [MS],
"Output-Name": imname1,
"Image-NPix": 2048,
"Image-Cell": 2,
"Cache-Reset": True,
"Freq-NBand": 2,
"Freq-NDegridBand": 4,
"Weight-ColName": "WEIGHT",
"Data-ChunkHours": 0.1,
"Data-Sort": True,
"Log-Boring": True,
"Deconv-MaxMajorIter": 2,
"Deconv-MaxMinorIter": 1500,
"Predict-ColName": "MODEL_DATA"
},
input=INPUT, output=OUTPUT, shared_memory="8gb",
label="image1",
time_out=1800)
recipe.add('cab/tricolour', "flag_data",
{
"ms": MS,
"data-column": "DATA",
"window-backend": 'numpy',
"flagging-strategy": "total_power",
"subtract-model-column": "MODEL_DATA",
},
input=INPUT, output=OUTPUT, label="flag_data",
time_out=1800)
maskname0 = "MASK.fits"
recipe.add('cab/cleanmask', 'mask0', {
"image": '%s.app.restored.fits:output' % (imname1),
"output": '%s:output' % (maskname0),
"dilate": False,
"sigma": 25,
},
input=INPUT,
output=OUTPUT,
label='mask0:: Make mask',
time_out=1800)
recipe.add("cab/ddfacet", "ddfacet_test2",
{
"Data-MS": [MS],
"Output-Name": imname1,
"Output-Images": "DdPAMRrIikze",
"Image-NPix": 4096,
"Image-Cell": 2.0,
"Cache-Reset": True,
"Freq-NBand": 2,
"Freq-NDegridBand": 4,
"Mask-External": '%s:output' % (maskname0),
"Weight-ColName": "WEIGHT",
"Data-ChunkHours": 0.1,
"Data-Sort": True,
"Log-Boring": True,
"Deconv-MaxMajorIter": 2,
"Deconv-MaxMinorIter": 1500,
},
input=INPUT, output=OUTPUT, shared_memory="24gb",
label="image2",
time_out=1800)
recipe.add("cab/shadems", "shadems_test",
{
'ms': MS,
'xaxis': 'DATA:imag',
'yaxis': 'real',
'col': 'DATA',
'png': '%s_shadems_test_real_imag' % (PREFIX)
},
input=INPUT, output=OUTPUT,
label="shadems_test",
time_out=1800)
# # First selfcal round
recipe.add("cab/catdagger", "auto_tagger_{}_{}".format("decaltest", "1"), {
'ds9-reg-file': "{}.{}.dE.reg".format("decaltest", "1"),
'ds9-tag-reg-file': "{}.{}.dE.clusterleads.reg".format("decaltest", "1"),
'sigma': 10,
'min-distance-from-tracking-centre': 350,
'noise-map': "{}.app.residual.fits:output".format(imname1),
}, input=INPUT, output=OUTPUT, label="auto_tagger_{}_{}".format("decaltest", "1"), shared_memory="250g")
recipe.add("cab/cubical_ddf", "cubical_cal",
{
'data-ms': MS,
'data-column': "DATA",
'dist-nworker': 4,
'dist-nthread': 1,
'dist-max-chunks': 20,
'data-freq-chunk': 0,
'data-time-chunk': 1,
'model-list': spf("MODEL_DATA+-{{}}{}@{{}}{}:{{}}{}@{{}}{}".format(
imname1+".DicoModel", "{}.{}.dE.reg".format("decaltest", "1"),
imname1+".DicoModel", "{}.{}.dE.reg".format("decaltest", "1")),
"output", "output", "output", "output"),
'log-verbose': "solver=0",
'weight-column': "WEIGHT",
'flags-apply': "FLAG",
'flags-auto-init': "legacy",
'madmax-enable': False,
'madmax-threshold': [0, 0, 10],
'madmax-global-threshold': [0, 0],
'sol-jones': 'g,dd',
'sol-stall-quorum': 0.95,
'out-name': "cubicaltest",
'out-column': "CORRECTED_DATA",
'log-verbose': "solver=0",
'g-type': "complex-2x2",
'g-freq-int': 0,
'dd-freq-int': 0,
'g-time-int': 20,
'dd-time-int': 20,
'g-max-iter': 10,
'sol-term-iters': 10,
'g-update-type': "phase-diag",
'dd-update-type': "complex-2x2",
'out-subtract-dirs': '1:',
'dd-fix-dirs': "0",
'dd-max-iter': 200,
'dd-clip-high': 0,
'dd-clip-low': 0,
'dd-max-prior-error': 0.35,
'dd-max-post-error': 0.35,
'degridding-NDegridBand': 3,
'degridding-MaxFacetSize': 0.15,
'out-mode': "sr",
}, input=INPUT, output=OUTPUT,
label="cubical",
shared_memory="24gb",
time_out=3600)
recipe.add("cab/cubical", "cubical_cal2",
{
'data-ms': MS,
'data-column': "DATA",
'dist-nworker': 4,
'dist-nthread': 1,
'dist-max-chunks': 20,
'data-freq-chunk': 0,
'data-time-chunk': 1,
'model-list': spf("MODEL_DATA"),
'weight-column': "WEIGHT",
'flags-apply': "FLAG",
'flags-auto-init': "legacy",
'madmax-enable': False,
'madmax-threshold': [0, 0, 10],
'madmax-global-threshold': [0, 0],
'sol-jones': 'g',
'sol-stall-quorum': 0.95,
'out-name': "cubicaltest",
'out-column': "CORRECTED_DATA",
'log-verbose': "solver=2",
'g-type': "complex-2x2",
'g-freq-int': 0,
'g-time-int': 20,
'g-max-iter': 10,
'sol-term-iters': 10,
'out-overwrite' : True,
'g-update-type': "complex-2x2",
}, input=INPUT, output=OUTPUT,
label="cubical",
shared_memory="24gb",
time_out=1800)
recipe.add("cab/ragavi_vis", "ragavi_vis_test",
{
'ms': MS,
'xaxis': 'imaginary',
'yaxis': 'real',
'data-column': 'CORRECTED_DATA',
'htmlname': "%s_ragavi_vis_real_imag" % (PREFIX)
},
input=INPUT, output=OUTPUT,
label="ragavi_vis_test",
time_out=1800)
recipe.run()
| 9,147 | 37.762712 | 112 | py |
Stimela | Stimela-master/stimela/tests/acceptance_tests/stimela-test-kat7.py | # -*- coding: future_fstrings -*-
import stimela
import os
import unittest
import subprocess
from nose.tools import timed
import shutil
class kat7_reduce(unittest.TestCase):
@classmethod
def setUpClass(cls):
unittest.TestCase.setUpClass()
# I/O
global INPUT
INPUT = 'input'
global MSDIR
MSDIR = 'msdir'
global MS
MS = 'kat-7-small.ms'
global PREFIX
PREFIX = 'kat7_small_LBand'
# Fields
global GCAL
GCAL = 'PKS2326-477'
global TARGET
TARGET = '1'
global BPCAL
BPCAL = 'PKS1934-638'
# Reference antenna
global REFANT
REFANT = '0'
# Calibration tables
global ANTPOS_TABLE
ANTPOS_TABLE = PREFIX + '.antpos:output'
global BPCAL_TABLE
BPCAL_TABLE = PREFIX + '.B0:output'
global DELAYCAL_TABLE
DELAYCAL_TABLE = PREFIX + '.K0:output'
global GAINCAL_TABLE
GAINCAL_TABLE = PREFIX + '.G0:output'
global FLUXSCALE_TABLE
FLUXSCALE_TABLE = PREFIX + '.fluxscale:output'
global LABEL
LABEL = "test_reduction"
global OUTPUT
OUTPUT = "output_%s" % LABEL
global MSCONTSUB
MSCONTSUB = MS + '.contsub'
global SPW
SPW = '0:100~355'
# Calibration tables
global LSM0
LSM0 = PREFIX + '.lsm.html'
global SELFCAL_TABLE1
SELFCAL_TABLE1 = PREFIX + '.SF1:output'
global IMAGE1
IMAGE1 = PREFIX + 'image1:output'
global MASK1
MASK1 = PREFIX + 'mask1.fits'
global IMAGE2
IMAGE2 = PREFIX + 'image2:output'
global nchans
nchans = 256
global chans
chans = [100, 355]
# Clean-Mask-Clean
global imname0
imname0 = PREFIX + 'image0'
global maskname0
maskname0 = PREFIX + 'mask0.fits'
global maskname01
maskname01 = PREFIX + 'mask01.fits'
global imname1
imname1 = PREFIX + 'image1'
global corr_ms
corr_ms = MS + '-corr.ms'
global lsm0
lsm0 = PREFIX + '-LSM0'
stimela.register_globals()
@classmethod
def tearDownClass(cls):
unittest.TestCase.tearDownClass()
def tearDown(self):
unittest.TestCase.tearDown(self)
def setUp(self):
unittest.TestCase.setUp(self)
def testEndToEndReduction(self):
global INPUT, OUTPUT, MSDIR, MS, LABEL
global GAINCAL_TABLE2, FLUXSCALE_TABLE, GAINCAL_TABLE, DELAYCAL_TABLE, BPCAL_TABLE, ANTPOS_TABLE
global REFANT, BPCAL, TARGET, GCAL, PREFIX
global MSCONTSUB, SPW, LSM0, SELFCAL_TABLE1, corr_ms, lsm0
global IMAGE1, IMAGE2, MASK1, nchans, chans, imname0, maskname0, maskname01, imname1
recipe = stimela.Recipe('Test reduction script',
ms_dir=MSDIR, JOB_TYPE="docker", log_dir="logs")
recipe.add('cab/casa_listobs', 'listobs', {
"vis": MS
},
input=INPUT,
output=OUTPUT,
label='listobs:: some stats',
time_out=300)
recipe.add("cab/owlcat_plotelev", "plotobs", {
"msname" : MS,
"output-name" : "obsplot.png",
},
input=INPUT,
output=OUTPUT,
label="plotobs:: Plot elevation/azimuth vs LST/UTC")
# It is common for the array to require a small amount of time to settle down at the start of a scan. Consequently, it has
# become standard practice to flag the initial samples from the start
# of each scan. This is known as 'quack' flagging
recipe.add('cab/casa_flagdata', 'quack_flagging', {
"vis": MS,
"mode": 'quack',
"quackinterval": 10.0,
"quackmode": 'beg',
},
input=INPUT,
output=OUTPUT,
label='quack_flagging:: Quack flagging',
time_out=300)
# Flag the autocorrelations
recipe.add("cab/politsiyakat_autocorr_amp", "flag_autopower", {
"msname": MS,
"field": "0,1,2",
"cal_field": "0,2",
"nrows_chunk": 5000,
"scan_to_scan_threshold": 1.5,
"antenna_to_group_threshold": 4,
"nio_threads": 1,
"nproc_threads": 32,
},input=INPUT, output=OUTPUT, label="flag_autopower")
recipe.add('cab/casa_flagdata', 'autocorr_flagging', {
"vis": MS,
"mode": 'manual',
"autocorr": True,
},
input=INPUT,
output=OUTPUT,
label='autocorr_flagging:: Autocorrelations flagging',
time_out=300)
# Flag bad channels
recipe.add('cab/casa_flagdata', 'badchan_flagging', {
"vis": MS,
"mode": 'manual',
"spw": "0:113~113,0:313~313,0:369~369,0:601~607,0:204~204,0:212~212,0:594~600",
},
input=INPUT,
output=OUTPUT,
label='badchan_flagging:: Bad Channel flagging',
time_out=300)
recipe.add('cab/casa_clearcal', 'clearcal',
{
"vis": MS,
"addmodel": True
},
input=INPUT,
output=OUTPUT,
label='clearcal:: casa clearcal',
time_out=300)
recipe.add('cab/casa_setjy', 'set_flux_scaling', {
"vis": MS,
"field": BPCAL,
"standard": 'Perley-Butler 2010',
"usescratch": True,
"scalebychan": True,
},
input=INPUT,
output=OUTPUT,
label='set_flux_scaling:: Set flux density value for the amplitude calibrator',
time_out=300)
recipe.add('cab/casa_bandpass', 'bandpass_cal', {
"vis": MS,
"caltable": BPCAL_TABLE,
"field": BPCAL,
"refant": REFANT,
"spw": SPW,
"solint": 'inf',
"bandtype": 'B',
# "opacity" : 0.0,
# "gaincurve" : False,
},
input=INPUT,
output=OUTPUT,
label='bandpass_cal:: Bandpass calibration',
time_out=300)
recipe.add('cab/ragavi', 'ragavi_gains_plot_bandpass', {
'table': BPCAL_TABLE,
'gaintype': "B",
'htmlname': PREFIX + '_B0_amp_chan'
},
input=INPUT,
output=OUTPUT,
label='ragavi_gains_plot_bandpass:: Plot bandpass table',
time_out=1200
)
# display the bandpass solutions. Note that in the plotcal inputs below, the amplitudes are being displayed as a function of
# frequency channel. The parameter subplot=221 is used to display multiple plots per page (2 plots per page in the y
# direction and 2 in the x direction). The first two commands below show the amplitude solutions (one per each polarization)
# and the last two show the phase solutions (one per each polarization). Parameter iteration='antenna' is used to step
# through separate plots for each antenna.
recipe.add('cab/casa_plotcal', 'plot_bandpass_amp_R', {
"caltable": BPCAL_TABLE,
"poln": 'R',
"xaxis": 'chan',
"yaxis": 'amp',
"field": BPCAL,
"spw": SPW,
"subplot": 221,
"figfile": PREFIX + '-B0-R-amp.png',
},
input=INPUT,
output=OUTPUT,
label='plot_bandpass_amp_R:: Plot bandpass table. AMP, R',
time_out=1200)
# Gain calibration - amplitude and phase - first for BPCAL.
recipe.add('cab/casa_gaincal', 'gaincal_bp', {
"vis": MS,
"caltable": GAINCAL_TABLE,
"field": "{0:s},{1:s}".format(BPCAL, GCAL),
"solint": 'inf',
"refant": '',
"gaintype": 'G',
"calmode": 'ap',
"spw": SPW,
"solnorm": False,
"gaintable": [BPCAL_TABLE],
"interp": ['nearest'],
},
input=INPUT,
output=OUTPUT,
label="gaincal:: Gain calibration",
time_out=300,
version=None)
# Set fluxscale
recipe.add('cab/casa_fluxscale', 'fluxscale', {
"vis": MS,
"caltable": GAINCAL_TABLE,
"fluxtable": FLUXSCALE_TABLE,
"reference": [BPCAL],
"transfer": [GCAL],
"save_result" : "fluxinfo.pickle",
"incremental": False,
},
input=INPUT,
output=OUTPUT,
label='fluxscale:: Set fluxscale',
time_out=300)
# Apply calibration to BPCAL
recipe.add('cab/casa_applycal', 'applycal_bp', {
"vis": MS,
"field": BPCAL,
"gaintable": [BPCAL_TABLE, FLUXSCALE_TABLE],
"gainfield": ['', '', BPCAL],
"interp": ['', '', 'nearest'],
"calwt": [False],
"parang": False,
"applymode": "calflag",
},
input=INPUT,
output=OUTPUT,
label='applycal_bp:: Apply calibration to Bandpass Calibrator',
time_out=1800)
# Flag the phase
recipe.add("cab/politsiyakat_cal_phase", "flag_calphase", {
"msname": MS,
"field": ",".join(["0","1","2"]),
"cal_field": ",".join(["0","2"]),
"nrows_chunk": 5000,
"data_column": "CORRECTED_DATA",
"scan_to_scan_threshold": 1.5,
"baseline_to_group_threshold": 4,
"nio_threads": 1,
"nproc_threads": 32,
},input=INPUT, output=OUTPUT, label="flag_calphase")
recipe.run()
recipe = stimela.Recipe('KAT reduction script 2',
ms_dir=MSDIR, JOB_TYPE="docker", log_dir="logs")
# Copy CORRECTED_DATA to DATA, so we can start uv_contsub
recipe.add("cab/msutils", "move_corrdata_to_data", {
"command": "copycol",
"msname": MS,
"fromcol": "CORRECTED_DATA",
"tocol": "DATA",
},
input=INPUT, output=OUTPUT,
label="move_corrdata_to_data::msutils",
time_out=1800)
os.system("rm -rf {}/{}-corr.ms".format(MSDIR, MS[:-3]))
recipe.add('cab/casa_split', 'split_corr_data',
{
"vis": MS,
"outputvis": MS[:-3] + '-corr.ms',
"field": str(BPCAL),
"spw": SPW,
"datacolumn": 'data',
},
input=INPUT,
output=OUTPUT,
label='split_corr_data:: Split corrected data from MS',
time_out=1800)
MS = MS[:-3] + '-corr.ms'
recipe.add('cab/casa_clearcal', 'prep_split_data',
{
"vis": MS,
"addmodel": True
},
input=INPUT,
output=OUTPUT,
label='prep_split_data:: Prep split data with casa clearcal',
time_out=1800)
# Clean-Mask-Clean
imname0 = PREFIX + 'image0'
maskname0 = PREFIX + 'mask0.fits'
maskname01 = PREFIX + 'mask01.fits'
imname1 = PREFIX + 'image1'
recipe.add('cab/casa_tclean', 'image_target_field_r1', {
"vis": MS,
"datacolumn": "corrected",
"field": "0",
"start": 21, # Other channels don't have any data
"nchan": 235 - 21,
"width": 1,
# Use Briggs weighting to weigh visibilities for imaging
"weighting": "briggs",
"robust": 0,
"imsize": 256, # Image size in pixels
"cellsize": "30arcsec", # Size of each square pixel
"niter": 100,
"stokes": "I",
"prefix": '%s:output' % (imname1),
},
input=INPUT,
output=OUTPUT,
label="image_target_field_r1:: Image target field second round",
time_out=300)
recipe.add('cab/cleanmask', 'mask0', {
"image": '%s.image.fits:output' % (imname1),
"output": '%s:output' % (maskname0),
"dilate": False,
"sigma": 20,
},
input=INPUT,
output=OUTPUT,
label='mask0:: Make mask',
time_out=1800)
lsm0 = PREFIX + '-LSM0'
# Source finding for initial model
recipe.add("cab/pybdsm", "extract_init_model", {
"image": '%s.image.fits:output' % (imname1),
"outfile": '%s:output' % (lsm0),
"thresh_pix": 25,
"thresh_isl": 15,
"port2tigger": True,
},
input=INPUT, output=OUTPUT,
label="extract_init_model:: Make initial model from preselfcal image",
time_out=1800)
# First selfcal round
recipe.add("cab/calibrator", "calibrator_Gjones_subtract_lsm0", {
"skymodel": "%s.lsm.html:output" % (lsm0),
"msname": MS,
"threads": 16,
"column": "DATA",
"output-data": "CORR_RES",
"Gjones": True,
# Ad-hoc right now, subject to change
"Gjones-solution-intervals": [20, 0],
"Gjones-matrix-type": "GainDiagPhase",
"tile-size": 512,
"field-id": 0,
},
input=INPUT, output=OUTPUT,
label="calibrator_Gjones_subtract_lsm0:: Calibrate and subtract LSM0",
time_out=1800)
# Diversity is a good thing... lets add some DDFacet to this soup bowl
imname = PREFIX + 'ddfacet'
recipe.add("cab/ddfacet", "ddfacet_test",
{
"Data-MS": [MS],
"Output-Name": imname,
"Image-NPix": 256,
"Image-Cell": 30,
"Cache-Reset": True,
"Freq-NBand": 2,
"Weight-ColName": "WEIGHT",
"Data-ChunkHours": 10,
"Beam-FITSFeed": "rl",
"Data-Sort": True,
"Log-Boring": True,
"Deconv-MaxMajorIter": 1,
"Deconv-MaxMinorIter": 20,
},
input=INPUT, output=OUTPUT, shared_memory="8gb",
label="image_target_field_r0ddfacet:: Make a test image using ddfacet",
time_out=520)
lsm1 = PREFIX + '-LSM0'
# Source finding for initial model
recipe.add("cab/pybdsm", "extract_init_model", {
"image": '%s.app.restored.fits:output' % (imname),
"outfile": '%s:output' % (lsm1),
"thresh_pix": 25,
"thresh_isl": 15,
"port2tigger": True,
},
input=INPUT, output=OUTPUT,
label="extract_init_model:: Make initial model from preselfcal image",
time_out=1800)
# Stitch LSMs together
lsm2 = PREFIX + '-LSM2'
recipe.add("cab/tigger_convert", "stitch_lsms1", {
"input-skymodel": "%s.lsm.html:output" % lsm0,
"output-skymodel": "%s.lsm.html:output" % lsm2,
"rename": True,
"force": True,
"append": "%s.lsm.html:output" % lsm1,
},
input=INPUT, output=OUTPUT,
label="stitch_lsms1::Create master lsm file",
time_out=300)
recipe.add("cab/calibrator", "calibrator_Gjones_subtract_lsm0", {
"skymodel": "%s.lsm.html:output" % (lsm2),
"msname": MS,
"threads": 16,
"column": "DATA",
"output-data": "CORR_RES",
"Gjones": True,
# Ad-hoc right now, subject to change
"Gjones-solution-intervals": [20, 0],
"Gjones-matrix-type": "GainDiagPhase",
"tile-size": 512,
"field-id": 0,
},
input=INPUT, output=OUTPUT,
label="calibrator_Gjones_subtract_lsm0:: Calibrate and subtract LSM0",
time_out=1800)
recipe.add('cab/casa_uvcontsub', 'uvcontsub',
{
"msname": MS,
"field": "0",
"fitorder": 1,
},
input=INPUT,
output=OUTPUT,
label='uvcontsub:: Subtract continuum in the UV plane',
time_out=1800)
# Image HI
recipe.add('cab/casa_clean', 'casa_dirty_cube',
{
"msname": MS + ".contsub",
"prefix": PREFIX,
"mode": 'channel',
"nchan": nchans,
"niter": 0,
"npix": 256,
"cellsize": 30,
"weight": 'natural',
"port2fits": True,
},
input=INPUT,
output=OUTPUT,
label='casa_dirty_cube:: Make a dirty cube with CASA CLEAN',
time_out=1800)
recipe.add('cab/sofia', 'sofia',
{
# USE THIS FOR THE WSCLEAN DIRTY CUBE
# "import.inFile" : '{:s}-cube.dirty.fits:output'.format(combprefix),
# USE THIS FOR THE CASA CLEAN CUBE
# CASA CLEAN cube
"import.inFile": '{:s}.image.fits:output'.format(PREFIX),
"steps.doFlag": False,
"steps.doScaleNoise": True,
"steps.doSCfind": True,
"steps.doMerge": True,
"steps.doReliability": False,
"steps.doParameterise": False,
"steps.doWriteMask": True,
"steps.doMom0": False,
"steps.doMom1": False,
"steps.doWriteCat": True,
"flag.regions": [],
"scaleNoise.statistic": 'mad',
"SCfind.threshold": 4,
"SCfind.rmsMode": 'mad',
"merge.radiusX": 2,
"merge.radiusY": 2,
"merge.radiusZ": 2,
"merge.minSizeX": 2,
"merge.minSizeY": 2,
"port2tigger": False,
"merge.minSizeZ": 2,
},
input=INPUT,
output=OUTPUT,
label='sofia:: Make SoFiA mask and images',
time_out=1800)
recipe.run()
| 19,391 | 34.386861 | 132 | py |
Stimela | Stimela-master/stimela/tests/unit_tests/test-containertech.py | # -*- coding: future_fstrings -*-
import stimela
import os
import sys
import unittest
import subprocess
from nose.tools import timed
import shutil
import glob
from stimela.exceptions import *
from stimela.dismissable import dismissable as sdm
from stimela.pathformatter import pathformatter as spf
from stimela import cargo, singularity
class basicrecipe_test(unittest.TestCase):
@classmethod
def setUpClass(cls):
unittest.TestCase.setUpClass()
global INPUT, MSDIR, OUTPUT, MS, PREFIX, LSM, MS_SIM
INPUT = os.path.join(os.path.dirname(__file__), "input")
MSDIR = "msdir"
global OUTPUT
OUTPUT = "/tmp/output"
# Start stimela Recipe instance
import stimela.main as main
os.chdir(os.path.dirname(__file__))
cab = cargo.cab.CabDefinition(parameter_file="cab/custom/parameters.json")
global SINGULARITY, PODMAN
SINGULARITY = False
PODMAN = False
if singularity.version:
if singularity.BINARY_NAME == "singularity":
SINGULARITY = singularity.version >= "2.6.0"
else:
SINGULARITY = True
@classmethod
def tearDownClass(cls):
pass
def tearDown(self):
unittest.TestCase.tearDown(self)
global MSDIR
global OUTPUT
if os.path.exists(MSDIR):
shutil.rmtree(MSDIR)
if os.path.exists(OUTPUT):
shutil.rmtree(OUTPUT)
if os.path.exists(INPUT):
shutil.rmtree(INPUT)
if os.path.exists("stimela_parameter_files"):
shutil.rmtree("stimela_parameter_files")
for log in glob.glob("log-*"):
os.remove(log)
def setUp(self):
unittest.TestCase.setUp(self)
if not os.path.isdir(INPUT):
os.mkdir(INPUT)
def test_singularity(self):
global MSDIR
global INPUT
global OUTPUT
global SINGULARITY
if SINGULARITY is False:
return
stimela.register_globals()
rrr = stimela.Recipe("singularitypaths",
ms_dir=MSDIR,
JOB_TYPE="singularity",
cabpath="cab/",
singularity_image_dir=os.environ["STIMELA_PULLFOLDER"],
log_dir="logs")
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla5": ["testinput2.txt:input",
"testinput3.txt:msfile",
spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input", "msfile", "output")],
}, input=INPUT, output=OUTPUT)
rrr.run() #validate and run
assert rrr.jobs[0].job._cab.parameters[4].value[0] == os.path.join(rrr.jobs[0].job.IODEST["input"],
"testinput2.txt")
assert rrr.jobs[0].job._cab.parameters[4].value[1] == os.path.join(rrr.jobs[0].job.IODEST["msfile"],
"testinput3.txt")
assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
"{}/hello{{reim}}.fits,{}/to.fits,{}/world.fits".format(
rrr.jobs[0].job.IODEST["input"],
rrr.jobs[0].job.IODEST["msfile"],
rrr.jobs[0].job.IODEST["output"]
)
def test_podman(self):
global MSDIR
global INPUT
global OUTPUT
global PODMAN
if PODMAN is False:
return
stimela.register_globals()
rrr = stimela.Recipe("podmanpaths",
ms_dir=MSDIR,
JOB_TYPE="podman",
cabpath="cab/",
log_dir="logs")
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla5": ["testinput2.txt:input",
"testinput3.txt:msfile",
spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input", "msfile", "output")],
}, input=INPUT, output=OUTPUT)
rrr.run() #validate and run
assert rrr.jobs[0].job._cab.parameters[4].value[0] == os.path.join(rrr.jobs[0].job.IODEST["input"],
"testinput2.txt")
assert rrr.jobs[0].job._cab.parameters[4].value[1] == os.path.join(rrr.jobs[0].job.IODEST["msfile"],
"testinput3.txt")
assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
"{}/hello{{reim}}.fits,{}/to.fits,{}/world.fits".format(
rrr.jobs[0].job.IODEST["input"],
rrr.jobs[0].job.IODEST["msfile"],
rrr.jobs[0].job.IODEST["output"]
)
if __name__ == "__main__":
unittest.main()
| 4,777 | 35.473282 | 108 | py |
Stimela | Stimela-master/stimela/tests/unit_tests/test-addcab.py | # -*- coding: future_fstrings -*-
import stimela
import os
import sys
import unittest
import subprocess
from nose.tools import timed
import shutil
import glob
from stimela.exceptions import *
from stimela.dismissable import dismissable as sdm
from stimela.pathformatter import pathformatter as spf
import stimela.cargo as cargo
class basicrecipe_test(unittest.TestCase):
@classmethod
def setUpClass(cls):
unittest.TestCase.setUpClass()
DIR = os.path.dirname(__file__)
global INPUT, MSDIR, OUTPUT, MS, PREFIX, LSM, MS_SIM
INPUT = os.path.join(DIR, "input")
MSDIR = "msdir"
global OUTPUT
OUTPUT = "/tmp/output"
global CABPATH
CABPATH = os.path.join(DIR, "cab")
@classmethod
def tearDownClass(cls):
pass
def tearDown(self):
unittest.TestCase.tearDown(self)
global MSDIR
global OUTPUT
if os.path.exists(MSDIR):
shutil.rmtree(MSDIR)
if os.path.exists(OUTPUT):
shutil.rmtree(OUTPUT)
if os.path.exists(INPUT):
shutil.rmtree(INPUT)
if os.path.exists("stimela_parameter_files"):
shutil.rmtree("stimela_parameter_files")
for log in glob.glob("log-*"):
os.remove(log)
def setUp(self):
unittest.TestCase.setUp(self)
if not os.path.isdir(INPUT):
os.mkdir(INPUT)
def test_define_cab(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("customcab", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "a"
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
assert len(rrr.jobs) == 1
rrr.run() #validate and run
assert rrr.jobs[0].job._cab.parameters[0].value == "a"
assert len(rrr.completed) == 1
assert len(rrr.remaining) == 0
def test_invalid_choice(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("invchoice", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "d" # only accepts a, b or c
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
with self.assertRaises(PipelineException):
rrr.run() #validate and run
def test_dismissable(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("testdismissable", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla4": sdm("abc"),
"bla3": sdm(None)
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
rrr.run() #validate and run
assert rrr.jobs[0].job._cab.parameters[0].value == "a"
assert rrr.jobs[0].job._cab.parameters[1].value is None
assert rrr.jobs[0].job._cab.parameters[2].value is None
assert rrr.jobs[0].job._cab.parameters[3].value == ["abc"]
def test_floattypefail(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("testfloattypefail", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla3": "1.0a",
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
with self.assertRaises(PipelineException):
rrr.run() #validate and run
def test_floattypesuccess(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("testfloattypesuccess", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla3": 4.0,
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
rrr.run() #validate and run
assert rrr.jobs[0].job._cab.parameters[2].value == [4.0]
def test_required(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("testrequired", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla3": 4.0,
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
with self.assertRaises(PipelineException):
rrr.run() #validate and run
def test_iooverride(self):
global MSDIR
global INPUT
global CABPATH
with open(os.path.join(INPUT, "testinput.txt"), "w+") as f:
pass
global OUTPUT
stimela.register_globals()
rrr = stimela.Recipe("testiooverrides", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla2": "testinput.txt:input",
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
rrr.run() #validate and run
assert rrr.jobs[0].job._cab.parameters[0].value == "a"
assert rrr.jobs[0].job._cab.parameters[1].value == os.path.join(rrr.jobs[0].job.IODEST["input"],
"testinput.txt")
def test_iopathval(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("ioval", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla2": "testinput2.txt:input",
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
with self.assertRaises(PipelineException): # not exist during validation
rrr.run() #validate and run
def test_iopathlist(self):
global MSDIR
global INPUT
global OUTPUT
global CABPATH
stimela.register_globals()
rrr = stimela.Recipe("pathlist", ms_dir=MSDIR)
rrr.add("cab/custom", "test1", {
"bla1": "a", # only accepts a, b or c
"bla5": ["testinput2.txt:input",
"testinput3.txt:msfile",
spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input", "msfile", "output")],
},
cabpath=CABPATH,
input=INPUT, output=OUTPUT)
rrr.run() #validate and run
assert rrr.jobs[0].job._cab.parameters[4].value[0] == os.path.join(rrr.jobs[0].job.IODEST["input"],
"testinput2.txt")
assert rrr.jobs[0].job._cab.parameters[4].value[1] == os.path.join(rrr.jobs[0].job.IODEST["msfile"],
"testinput3.txt")
assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
"{}/hello{{reim}}.fits,{}/to.fits,{}/world.fits".format(rrr.jobs[0].job.IODEST["input"],
rrr.jobs[0].job.IODEST["msfile"],
rrr.jobs[0].job.IODEST["output"]
)
if __name__ == "__main__":
unittest.main()
| 7,141 | 31.912442 | 108 | py |
Stimela | Stimela-master/stimela/tests/unit_tests/cab/custom/src/run.py | import os
import sys
import yaml
import glob
import shutil
import shlex
import subprocess
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
url = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
elif value is True:
value = ''
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,041 | 22.155556 | 91 | py |
Stimela | Stimela-master/stimela/utils/xrun_livelog.py | import sys, os, codecs, time, hashlib, subprocess
from threading import Event, Thread
from . import StimelaCabRuntimeError
DEBUG = False
INTERRUPT_TIME = 2.0 # seconds -- do not want to constantly interrupt the child process
LIVELOG_TIME = 0.1
def xrun(command, options, log=None, _log_container_as_started=False, logfile=None, timeout=-1, kill_callback=None):
"""
Run something on command line.
Example: _run("ls", ["-lrt", "../"])
"""
if "LOGFILE" in os.environ and logfile is None:
logfile = os.environ["LOGFILE"] # superceed if not set
# skip lines from previous log files
if logfile is not None and os.path.exists(logfile):
with codecs.open(logfile, "r", encoding="UTF-8",
errors="ignore", buffering=0) as foutlog:
lines = foutlog.readlines()
prior_log_bytes_read = foutlog.tell()
else: # not existant, create
prior_log_bytes_read = 0
if logfile is not None and not os.path.exists(logfile):
with codecs.open(logfile, "w+", encoding="UTF-8",
errors="ignore", buffering=0) as foutlog:
pass
cmd = " ".join([command] + list(map(str, options)))
def _remove_ctrls(msg):
import re
ansi_escape = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', msg)
def _print_info(msg, loglevel="INFO"):
if msg is None:
return
msg = _remove_ctrls(msg)
if msg.strip() == "": return
if log:
try:
getattr(log, loglevel.lower())(msg.rstrip('\n'))
except UnicodeError:
log.warn("Log contains unicode and will not be printed")
else:
try:
print(msg),
except UnicodeError:
print("Log contains unicode and will not be printed")
def _print_warn(msg):
if msg is None:
return
msg = _remove_ctrls(msg)
if msg.strip() == "": return
if log:
try:
log.info(msg.rstrip('\n'))
except UnicodeError:
log.warn("Log contains unicode and will not be printed")
else:
try:
print(msg),
except UnicodeError:
print("Log contains unicode and will not be printed")
_print_info(u"Running: {0:s}".format(cmd), loglevel="INFO")
sys.stdout.flush()
starttime = time.time()
process = p = None
stop_log_printer = Event()
try:
foutname = os.path.join("/tmp", "stimela_output_{0:s}_{1:f}".format(
hashlib.md5(cmd.encode('utf-8')).hexdigest(), starttime))
p = process = subprocess.Popen(cmd,
shell=True)
kill_callback = kill_callback or p.kill
def clock_killer(p):
while process.poll() is None and (timeout >= 0):
currenttime = time.time()
if (currenttime - starttime < timeout):
DEBUG and _print_warn(u"Clock Reaper: has been running for {0:f}, must finish in {1:f}".format(
currenttime - starttime, timeout))
else:
_print_warn(
u"Clock Reaper: Timeout reached for '{0:s}'... sending the KILL signal".format(cmd))
kill_callback()
time.sleep(INTERRUPT_TIME)
def log_reader(logfile, stop_event):
bytes_read = prior_log_bytes_read # skip any previous runs' output
while not stop_event.isSet():
if logfile is not None and os.path.exists(logfile):
with codecs.open(logfile, "r", encoding="UTF-8",
errors="ignore", buffering=0) as foutlog:
foutlog.seek(bytes_read, 0)
lines = foutlog.readlines()
bytes_read = foutlog.tell()
for line in lines:
line and _print_info(line)
time.sleep(LIVELOG_TIME) # wait for the log to go to disk
Thread(target=clock_killer, args=tuple([p])).start()
if log is not None:
# crucial - child process should not write to stdout unless it is
# the container process itself
Thread(target=log_reader, args=tuple([logfile, stop_log_printer])).start()
while (process.poll() is None):
currenttime = time.time()
DEBUG and _print_info(
u"God mode on: has been running for {0:f}".format(currenttime - starttime))
# this is probably not ideal as it interrupts the process every few seconds,
time.sleep(INTERRUPT_TIME)
# check whether there is an alternative with a callback
assert hasattr(
process, "returncode"), "No returncode after termination!"
finally:
stop_log_printer.set()
if (process is not None) and process.returncode:
raise StimelaCabRuntimeError(
'%s: returns errr code %d' % (command, process.returncode))
| 5,204 | 36.446043 | 116 | py |
Stimela | Stimela-master/stimela/utils/logger.py | import os
import sys
import json
import yaml
import time
import subprocess
from io import StringIO
import codecs
from datetime import datetime
import logging
class MultiplexingHandler(logging.Handler):
"""handler to send INFO and below to stdout, everything above to stderr"""
def __init__(self, info_stream=sys.stdout, err_stream=sys.stderr):
super(MultiplexingHandler, self).__init__()
self.info_handler = logging.StreamHandler(info_stream)
self.err_handler = logging.StreamHandler(err_stream)
self.multiplex = True
def emit(self, record):
handler = self.err_handler if record.levelno > logging.INFO and self.multiplex else self.info_handler
handler.emit(record)
# ignore broken pipes, this often happens when cleaning up and exiting
try:
handler.flush()
except BrokenPipeError:
pass
def flush(self):
try:
self.err_handler.flush()
self.info_handler.flush()
except BrokenPipeError:
pass
def close(self):
self.err_handler.close()
self.info_handler.close()
def setFormatter(self, fmt):
self.err_handler.setFormatter(fmt)
self.info_handler.setFormatter(fmt)
class ConsoleColors():
WARNING = '\033[93m' if sys.stdin.isatty() else ''
ERROR = '\033[91m' if sys.stdin.isatty() else ''
BOLD = '\033[1m' if sys.stdin.isatty() else ''
DIM = '\033[2m' if sys.stdin.isatty() else ''
GREEN = '\033[92m' if sys.stdin.isatty() else ''
ENDC = '\033[0m' if sys.stdin.isatty() else ''
BEGIN = "<COLORIZE>"
END = "</COLORIZE>"
@staticmethod
def colorize(msg, *styles):
style = "".join(styles)
return msg.replace(ConsoleColors.BEGIN, style).replace(ConsoleColors.END, ConsoleColors.ENDC if style else "")
class ColorizingFormatter(logging.Formatter):
"""This Formatter inserts color codes into the string according to severity"""
def __init__(self, fmt=None, datefmt=None, style="%", default_color=None):
super(ColorizingFormatter, self).__init__(fmt, datefmt, style)
self._default_color = default_color or ""
def format(self, record):
style = ConsoleColors.BOLD if hasattr(record, 'boldface') else ""
if hasattr(record, 'color'):
style += getattr(ConsoleColors, record.color or "None", "")
elif record.levelno >= logging.ERROR:
style += ConsoleColors.ERROR
elif record.levelno >= logging.WARNING:
style += ConsoleColors.WARNING
return ConsoleColors.colorize(super(ColorizingFormatter, self).format(record), style or self._default_color)
class SelectiveFormatter(logging.Formatter):
"""Selective formatter. if condition(record) is True, invokes other formatter"""
def __init__(self, default_formatter, dispatch_list):
self._dispatch_list = dispatch_list
self._default_formatter = default_formatter
def format(self, record):
for condition, formatter in self._dispatch_list:
if condition(record):
return formatter.format(record)
else:
return self._default_formatter.format(record)
| 3,242 | 34.637363 | 118 | py |
Stimela | Stimela-master/stimela/utils/xrun_poll.py | import select, traceback, subprocess, errno, re, time, logging, os, sys
DEBUG = 0
from . import StimelaCabRuntimeError, StimelaProcessRuntimeError
log = None
def get_stimela_logger():
"""Returns Stimela's logger, or None if no Stimela installed"""
try:
import stimela
return stimela.logger()
except ImportError:
return None
def global_logger():
"""Returns Stimela logger if running in stimela, else inits a global logger"""
global log
if log is None:
log = get_stimela_logger()
if log is None:
# no stimela => running payload inside a cab -- just use the global logger and make it echo everything to the console
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
log = logging.getLogger()
return log
class SelectPoller(object):
"""Poller class. Poor man's select.poll(). Damn you OS/X and your select.poll will-you-won'y-you bollocks"""
def __init__ (self, log):
self.fdlabels = {}
self.log = log
def register_file(self, fobj, label):
self.fdlabels[fobj.fileno()] = label, fobj
def register_process(self, po, label_stdout='stdout', label_stderr='stderr'):
self.fdlabels[po.stdout.fileno()] = label_stdout, po.stdout
self.fdlabels[po.stderr.fileno()] = label_stderr, po.stderr
def poll(self, timeout=5, verbose=False):
while True:
try:
to_read, _, _ = select.select(self.fdlabels.keys(), [], [], timeout)
self.log.debug("poll(): ready to read: {}".format(to_read))
# return on success or timeout
return [self.fdlabels[fd] for fd in to_read]
except (select.error, IOError) as ioerr:
if verbose:
self.log.debug("poll() exception: {}".format(traceback.format_exc()))
if hasattr(ioerr, 'args'):
err = ioerr.args[0] # py2
else:
err = ioerr.errno # py3
# catch interrupted system call -- return if we have a timeout, else
# loop again
if err == errno.EINTR:
if timeout is not None:
if verbose:
self.log.debug("poll(): returning")
return []
if verbose:
self.log.debug("poll(): retrying")
else:
raise ioerr
def unregister_file(self, fobj):
if fobj.fileno() in self.fdlabels:
del self.fdlabels[fobj.fileno()]
def __contains__(self, fobj):
return fobj.fileno() in self.fdlabels
class Poller(object):
"""Poller class. Wraps select.poll()."""
def __init__ (self, log):
self.fdlabels = {}
self.log = log
self._poll = select.poll()
def register_file(self, fobj, label):
self.fdlabels[fobj.fileno()] = label, fobj
self._poll.register(fobj.fileno(), select.POLLIN)
def register_process(self, po, label_stdout='stdout', label_stderr='stderr'):
self.fdlabels[po.stdout.fileno()] = label_stdout, po.stdout
self.fdlabels[po.stderr.fileno()] = label_stderr, po.stderr
self._poll.register(po.stdout.fileno(), select.POLLIN)
self._poll.register(po.stderr.fileno(), select.POLLIN)
def poll(self, timeout=5, verbose=False):
try:
to_read = self._poll.poll(timeout*1000)
if verbose:
self.log.debug("poll(): ready to read: {}".format(to_read))
return [self.fdlabels[fd] for (fd, ev) in to_read]
except Exception:
if verbose:
self.log.debug("poll() exception: {}".format(traceback.format_exc()))
raise
def unregister_file(self, fobj):
if fobj.fileno() in self.fdlabels:
self._poll.unregister(fobj.fileno())
del self.fdlabels[fobj.fileno()]
def __contains__(self, fobj):
return fobj.fileno() in self.fdlabels
def _remove_ctrls(msg):
ansi_escape = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', msg)
def xrun_nolog(command, name=None):
log = global_logger()
name = name or command.split(" ", 1)[0]
try:
log.info("# running {}".format(command))
status = subprocess.call(command, shell=True)
except KeyboardInterrupt:
log.error("# {} interrupted by Ctrl+C".format(name))
raise
except Exception as exc:
for line in traceback.format_exc():
log.error("# {}".format(line.strip()))
log.error("# {} raised exception: {}".format(name, str(exc)))
raise
if status:
raise StimelaProcessRuntimeError("{} returns error code {}".format(name, status))
return 0
def xrun(command, options, log=None, logfile=None, env=None, timeout=-1, kill_callback=None, output_wrangler=None):
command_name = command
# this part could be inside the container
command = " ".join([command] + list(map(str, options)))
log = log or get_stimela_logger()
if log is None:
return xrun_nolog(command, name=command_name)
# this part is never inside the container
import stimela
log = log or stimela.logger()
log.info("running " + command, extra=dict(stimela_subprocess_output=(command_name, "start")))
start_time = time.time()
proc = subprocess.Popen([command], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env, bufsize=1, universal_newlines=True, shell=True)
poller = Poller(log=log)
poller.register_process(proc)
proc_running = True
try:
while proc_running and poller.fdlabels:
fdlist = poller.poll(verbose=DEBUG>0)
# print(f"fdlist is {fdlist}")
for fname, fobj in fdlist:
try:
line = fobj.readline()
except EOFError:
line = b''
# print("read {} from {}".format(line, fname))
empty_line = not line
line = (line.decode('utf-8') if type(line) is bytes else line).rstrip()
# break out if process closes
if empty_line:
poller.unregister_file(fobj)
if proc.stdout not in poller and proc.stderr not in poller:
log.debug("The {} process has exited".format(command))
proc_running = None
break
continue
# dispatch output to log
line = _remove_ctrls(line)
severity = logging.WARNING if fobj is proc.stderr else logging.INFO
stream_name = "stderr" if fobj is proc.stderr else "stdout"
# feed through wrangler to adjust severity and content
if output_wrangler is not None:
line, severity = output_wrangler(line, severity, log)
if line is not None:
log.log(severity, line, extra=dict(stimela_subprocess_output=(command_name, stream_name)))
if timeout > 0 and time.time() > start_time + timeout:
log.error("timeout, killing {} process".format(command))
kill_callback() if callable(kill_callback) else proc.kill()
proc_running = False
proc.wait()
status = proc.returncode
except SystemExit as exc:
log.error("{} has exited with code {}".format(command, exc.code))
proc.wait()
status = exc.code
raise StimelaCabRuntimeError('{}: SystemExit with code {}'.format(command_name, status))
except KeyboardInterrupt:
if callable(kill_callback):
log.warning("Ctrl+C caught: shutting down {} process, please give it a few moments".format(command_name))
kill_callback()
log.info("the {} process was shut down successfully".format(command_name),
extra=dict(stimela_subprocess_output=(command_name, "status")))
else:
log.warning("Ctrl+C caught, killing {} process".format(command_name))
proc.kill()
proc.wait()
raise StimelaCabRuntimeError('{} interrupted with Ctrl+C'.format(command_name))
except Exception as exc:
traceback.print_exc()
log.error("Exception caught: {}".format(str(exc)))
proc.wait()
raise StimelaCabRuntimeError("{} throws exception '{}'".format(command_name, str(exc)))
if status:
raise StimelaCabRuntimeError("{} returns error code {}".format(command_name, status))
return status
| 8,781 | 37.017316 | 129 | py |
Stimela | Stimela-master/stimela/utils/__init__.py | import os
import sys
import json
import yaml
import time
import tempfile
import inspect
import warnings
import re
import math
import codecs
class StimelaCabRuntimeError(RuntimeError):
pass
class StimelaProcessRuntimeError(RuntimeError):
pass
CPUS = 1
from .xrun_poll import xrun
def assign(key, value):
frame = inspect.currentframe().f_back
frame.f_globals[key] = value
def readJson(conf):
with open(conf, "r") as _std:
jdict = yaml.safe_load(_std)
return jdict
def writeJson(config, dictionary):
with codecs.open(config, 'w', 'utf8') as std:
std.write(json.dumps(dictionary, ensure_ascii=False))
def get_Dockerfile_base_image(image):
if os.path.isfile(image):
dockerfile = image
else:
dockerfile = "{:s}/Dockerfile".format(image)
with open(dockerfile, "r") as std:
_from = ""
for line in std.readlines():
if line.startswith("FROM"):
_from = line
return _from
def change_Dockerfile_base_image(path, _from, label, destdir="."):
if os.path.isfile(path):
dockerfile = path
dirname = os.path.dirname(path)
else:
dockerfile = "{:s}/Dockerfile".format(path)
dirname = path
with open(dockerfile, "r") as std:
lines = std.readlines()
for line in lines:
if line.startswith("FROM"):
lines.remove(line)
temp_dir = tempfile.mkdtemp(
prefix="tmp-stimela-{:s}-".format(label), dir=destdir)
xrun(
"cp", ["-r", "{:s}/Dockerfile {:s}/src".format(dirname, dirname), temp_dir])
dockerfile = "{:s}/Dockerfile".format(temp_dir)
with open(dockerfile, "w") as std:
std.write("{:s}\n".format(_from))
for line in lines:
std.write(line)
return temp_dir, dockerfile
def get_base_images(logfile, index=1):
with open(logfile, "r") as std:
string = std.read()
separator = "[================================DONE==========================]"
log = string.split(separator)[index-1]
images = []
for line in log.split("\n"):
if line.find("<=BASE_IMAGE=>") > 0:
tmp = line.split("<=BASE_IMAGE=>")[-1]
image, base = tmp.split("=")
images.append((image.strip(), base))
return images
def substitute_globals(string, globs=None):
sub = set(re.findall('\{(.*?)\}', string))
globs = globs or inspect.currentframe().f_back.f_globals
if sub:
for item in map(str, sub):
string = string.replace("${%s}" % item, globs[item])
return string
else:
return False
| 2,647 | 21.827586 | 84 | py |
Stimela | Stimela-master/stimela/cargo/__init__.py | import os
ekhaya = os.path.dirname(__file__)
# Path to base images
BASE_PATH = "{:s}/base".format(ekhaya)
# Path to executor images
CAB_PATH = "{:s}/cab".format(ekhaya)
# Path to config templates
CONFIG_TEMPLATES = "{:s}/configs".format(ekhaya)
| 249 | 18.230769 | 48 | py |
Stimela | Stimela-master/stimela/cargo/base/__init__.py | 0 | 0 | 0 | py | |
Stimela | Stimela-master/stimela/cargo/cab/__init__.py | # -*- coding: future_fstrings -*-
import stimela
from stimela import utils, recipe
import logging
import os
import sys
import textwrap
from stimela.pathformatter import pathformatter, placeholder
from stimela.exceptions import *
import time
TYPES = {
"str": str,
"float": float,
"bool": bool,
"int": int,
"list": list,
}
__vol = {
"home" : "/stimela_home",
"mount": "/stimela_mount",
}
for item in list(__vol.keys()):
val = __vol[item]
while os.path.exists(val):
__timestamp = str(time.time()).replace(".", "")
val = "{0:s}-{1:s}".format(val.split("-")[0], __timestamp)
__vol[item] = val
HOME = __vol["home"]
MOUNT = __vol["mount"]
IODEST = {
"input": f"{MOUNT}/input",
"output": f"{MOUNT}/output",
"msfile": f"{MOUNT}/msdir",
"tmp": f"{MOUNT}/output/tmp",
}
class Parameter(object):
def __init__(self, name, dtype, info,
default=False,
required=False,
choices=None,
io=None,
mapping=None,
check_io=True,
deprecated=False,
positional=False):
self.name = name
self.io = io
if not isinstance(dtype, (list, tuple)):
dtype = [dtype]
self.dtype = []
for item in dtype:
tmp = self.get_type(item)
self.dtype.append(tmp)
self.info = info
self.default = default
self.required = required
self.choices = choices or []
self.mapping = mapping
self.check_io = check_io
self.deprecated = deprecated
self.positional = positional
self.value = None
def __iter__(self):
for x in ["info", "default", "positional", "required", "choices", "mapping",
"check_io", "value", "name", "io", "dtype"]:
yield x
def __getitem__(self, v):
return getattr(self, v)
def validate(self, value):
if self.choices and value not in self.choices:
raise StimelaCabParameterError("Parameter '{0}', can only be either of {1}".format(
self.name, self.choices))
for item in self.dtype:
if isinstance(item, tuple):
l, t = item
if t == "file":
return True
if isinstance(value, t):
return True
elif isinstance(value, list):
types = (t,int) if t is float else (t,) # float permits ints as well
if all(isinstance(x, types) for x in value): # check that all elements are of permitted type
return True
elif item == "file":
return True
elif isinstance(value, tuple([item]+[int] if item is float else [item])):
return True
raise StimelaCabParameterError("Expecting any of types {0} for parameter '{1}', but got '{2}'".format(
self.dtype, self.name, type(value).__name__))
def get_type(self, dtype):
def _type(a):
if a == "file":
if self.io not in ["input", "output", "msfile"]:
raise StimelaCabParameterError("io '{0}' for parameter '{1}' not understood. Please specify 'io' as either 'input', 'output' or 'msfile'".format(
self.io, self.name))
return "file"
else:
return TYPES[a]
if dtype.startswith("list:"):
val = dtype.split(":")
if len(val) != 2:
raise StimelaCabParameterError(
"The type of '{0}' could not validate. Specify list types as \"list:dtype\" where dtype is normal type")
ttype = val[1]
return (list, _type(ttype))
else:
return _type(dtype)
class CabDefinition(object):
def __init__(self,
indir=None, # input directory
outdir=None, # output directory
msdir=None, # MS directory
parameter_file=None,
task=None,
base=None,
binary=None,
description=None,
tag=[],
prefix=None,
parameters=[],
version=[],
junk=[]):
self.indir = indir
self.outdir = outdir
if parameter_file:
cab = utils.readJson(parameter_file)
if not isinstance(cab["tag"], list):
tag = [cab["tag"]]
version = [cab.get("version", "x.x.x")]
else:
tag = cab["tag"]
version = cab["version"]
self.task = cab["task"]
self.base = cab["base"]
self.binary = cab["binary"]
self.tag = tag
self.junk = cab.get("junk", [])
self.wranglers = cab.get("wranglers", [])
self.version = version
if cab["msdir"]:
self.msdir = msdir
else:
self.msdir = None
self.description = cab["description"]
self.prefix = cab["prefix"]
parameters0 = cab["parameters"]
self.parameters = []
for param in parameters0:
default = param.get("default", param.get("value", None))
addme = Parameter(name=param["name"],
dtype=param["dtype"],
io=param.get("io", None),
info=param.get(
"info", None) or "No documentation. Bad! Very bad...",
default=default,
mapping=param.get("mapping", None),
required=param.get("required", False),
positional=param.get("positional", False),
choices=param.get("choices", False),
check_io=param.get("check_io", True),
deprecated=param.get("deprecated", False))
self.parameters.append(addme)
else:
self.task = task
self.base = base
self.binary = binary
self.prefix = prefix
self.parameters = parameters
self.description = description
self.msdir = msdir
self.tag = tag
self.version = version
self.junk = []
self.wranglers = []
self.log = stimela.logger()
def __str__(self):
res = ""
res += "Cab definition for {}\n".format(self.task)
for b in ["base", "binary", "prefix", "description", "tag", "version", "junk", "wranglers"]:
res += "\t {}: {}\n".format(b, getattr(self, b))
res += "\t Parameters:\n"
for p in self.parameters:
res += "\t\t {}:\n".format(p.name)
for k in p:
res += "\t\t\t {}: {}\n".format(k, str(p[k]))
return res
def display(self, header=False):
rows, cols = os.popen('stty size', 'r').read().split()
lines = textwrap.wrap(self.description, int(cols)*3/4)
print("Cab {0} version {1}".format(self.task, self.version))
print("Info {}".format(lines[0]))
for line in lines[1:]:
print(" {}".format(line))
if header:
print(" ")
return
print("Base Image {0}:{1}".format(self.base, self.tag))
print("\n")
print("Parameters:")
rows, cols = os.popen('stty size', 'r').read().split()
for param in self.parameters:
_types = ""
for i, _type in enumerate(param.dtype):
if isinstance(_type, tuple):
_name = "list:{}".format(
"file" if _type[1] == "file" else _type[1].__name__)
else:
_name = "file" if _type == "file" else _type.__name__
_types += "{}".format(_name) if i == 0 else "/{}".format(_name)
lines = textwrap.wrap(param.info, int(cols)*3/4)
print(" Name {}{}".format(param.name,
"/{}".format(param.mapping) if param.mapping else ""))
print(" Description {}".format(lines[0]))
for line in lines[1:]:
print(" {}".format(line))
print(" Type {}".format(_types))
print(" Default {}".format(param.default))
if param.choices:
print(" Choices {}".format(param.choices))
print(" ")
def toDict(self):
conf = {}
for item in "task base binary msdir description prefix tag version junk wranglers".split():
if item == 'msdir':
conf[item] = getattr(self, item, False)
else:
conf[item] = getattr(self, item)
conf["parameters"] = []
for param in self.parameters:
if isinstance(param.dtype[0], tuple):
if not isinstance(param.value, (list, tuple)) and param.value is not None:
param.value = [param.value]
_types = ""
for i, _type in enumerate(param.dtype):
if isinstance(_type, tuple):
_name = "list:{}".format(
"file" if _type[1] == "file" else _type[1].__name__)
else:
_name = "file" if _type == "file" else _type.__name__
_types += "{}".format(_name) if i == 0 else "/{}".format(_name)
conf["parameters"].append(
{
"name": param.mapping or param.name,
"dtype": _types,
"info": param.info,
"required": param.required,
"positional": param.positional,
"check_io": param.check_io,
"value": param.default if param.value is None else param.value
})
return conf
def update(self, options, saveconf, tag=None):
required = filter(lambda a: a.required, self.parameters)
tag = tag or self.tag
for param0 in required:
if param0.name not in options.keys() and param0.mapping not in options.keys():
raise StimelaCabParameterError(
"Parameter {} is required but has not been specified".format(param0.name))
self.log.info(f"Validating parameters for cab {self.task} ({self.base}:{tag})")
for name, value in options.items():
found = False
for param in self.parameters:
if name in [param.name, param.mapping]:
found = True
if param.deprecated:
self.log.warning(f"Parameter {name} for cab {self.task} is deprecated, and will be removed in a future release")
if param.io:
if value is None:
continue
param.validate(value)
param.value = []
if not isinstance(value, (list, tuple)):
value = [value]
for _value in value:
if isinstance(_value, pathformatter):
if param.check_io:
raise StimelaCabParameterError("Pathformatters cannot be used on io parameters where io has to be checked")
joinlist = _value() # construct placeholder list
joined_str = ""
for p in joinlist:
if not isinstance(p, placeholder):
joined_str += p
else:
if p() not in IODEST.keys():
raise StimelaCabParameterError('The location \'{0}\' specified for parameter \'{1}\', is unknown. Choices are {2}'.format(
p(), param.name, IODEST.keys()))
location = p()
if location in ["input", "msfile"]:
if location == "input" and self.indir is None:
raise StimelaCabParameterError(
"You have specified input files, but have not specified an input folder")
if location == "msfile" and self.msdir is None:
raise StimelaCabParameterError(
"You have specified MS files, but have not specified an MS folder")
joined_str += "{0}/".format(IODEST[location])
else:
if self.outdir is None:
raise StimelaCabParameterError(
"You have specified output files, but have not specified an output folder")
joined_str += "{0}/".format(IODEST[location])
param.value.append(joined_str)
elif isinstance(_value, str):
val = _value.split(":")
if len(val) == 2:
if val[1] not in IODEST.keys():
raise StimelaCabParameterError('The location \'{0}\' specified for parameter \'{1}\', is unknown. Choices are {2}'.format(
val[1], param.name, IODEST.keys()))
self.log.info("Location of '{0}' was specified as '{1}'. Will overide default.".format(
param.name, val[1]))
_value = val[0]
location = val[1]
else:
location = param.io
if location in ["input", "msfile"]:
if location == "input" and self.indir is None:
raise StimelaCabParameterError(
"You have specified input files, but have not specified an input folder")
if location == "msfile" and self.msdir is None:
raise StimelaCabParameterError(
"You have specified MS files, but have not specified an MS folder")
path = "{0}/{1}".format(self.indir if location ==
"input" else self.msdir, _value)
if param.check_io and not os.path.exists(path):
raise StimelaCabParameterError("File '{0}' for parameter '{1}' could not be located at '{2}'.".format(
_value, param.name, path))
param.value.append(
"{0}/{1}".format(IODEST[location], _value))
else:
if self.outdir is None:
raise StimelaCabParameterError(
"You have specified output files, but have not specified an output folder")
param.value.append(
"{0}/{1}".format(IODEST[location], _value))
else:
raise StimelaCabParameterError("io parameter must either be a pathformatter object or a string")
if len(param.value) == 1:
param.value = param.value[0]
else: # not io type
if isinstance(value, pathformatter):
raise StimelaCabParameterError("Path formatter type specified, but {} is not io".format(param.name))
self.log.debug(
"Validating parameter {}".format(param.name))
param.validate(value)
param.value = value
if not found:
raise StimelaCabParameterError(
"Parameter {0} is unknown. Run 'stimela cabs -i {1}' to get help on this cab".format(name, self.task))
conf = {}
conf.update(self.toDict())
utils.writeJson(saveconf, conf)
self.log.info(f"Parameters validated and saved to {saveconf}")
| 17,365 | 42.415 | 166 | py |
Stimela | Stimela-master/stimela/cargo/cab/update_casa_version_tag.py | import json
import glob
from collections import OrderedDict
for cabfile in glob.glob("casa_*/*.json"):
print(cabfile)
with open(cabfile, 'rb') as stdr:
cabdict = json.load(stdr, object_pairs_hook=OrderedDict)
cabdict["version"] = ["4.7.2", "5.6.1-8", "5.8.0"]
cabdict["tag"] = ["0.3.0-2", "1.6.3", "1.7.1"]
cabdict["junk"] = ["%s.last" % (cabdict["binary"])]
with open(cabfile, 'w') as stdw:
json.dump(cabdict, stdw, indent=2)
| 469 | 30.333333 | 64 | py |
Stimela | Stimela-master/stimela/cargo/cab/aimfast/src/run.py | import os
import sys
import shlex
import shutil
import subprocess
import yaml
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
if name in ['compare-images', 'compare-residuals', 'compare-models']:
compare = []
for i, val in enumerate(value):
compare.append(val)
# Compare models/images in pairs
if i%2:
args += ['{0}{1} {2}'.format(cab['prefix'],
name, " ".join(compare))]
compare = []
elif name in ['compare-online']:
for val in value:
args += ['{0}{1} {2}'.format(cab['prefix'], name, val)]
elif name in ['compare-residual-subimages', 'centre-pixels-size']:
args += ['{0}{1} {2}'.format(cab['prefix'],
name, " ".join(value))]
elif param['dtype'] in ['bool']:
args += ['{0}{1}'.format(cab['prefix'], name)]
else:
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]]+ args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,759 | 28.333333 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/cubical_ddf/src/run.py | import os
import sys
import shlex
import shutil
import subprocess
import glob
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
parset = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
value = 0
elif value is True:
value = 1
elif name == 'parset':
parset = [value]
continue
elif isinstance(value, list):
value = ",".join(map(str, value))
args[name] = value
# available jones terms
joneses = "g b dd".split()
soljones = args.pop("sol-jones")
for jones in joneses:
if jones.lower() not in soljones.lower():
jopts = filter(lambda a: a.startswith(
"{0:s}-".format(jones)), args.keys())
for item in list(jopts):
del args[item]
opts = ["{0:s}sol-jones {1:s}".format(cab["prefix"], soljones)] + \
['{0}{1} {2}'.format(cab['prefix'], name, value)
for name, value in args.items()]
_runc = " ".join([cab["binary"]] + parset + opts)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,630 | 23.343284 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/sofia/src/run.py | import os
import sys
import Tigger
import numpy
import tempfile
import json
import codecs
import shlex
import shutil
import glob
import subprocess
from astLib.astWCS import WCS
from Tigger.Models import SkyModel, ModelClasses
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with codecs.open(CONFIG, "r", "utf8") as stdr:
cab = json.load(stdr)
junk = cab["junk"]
args = []
msname = None
sofia_file = 'sofia_parameters.par'
wstd = open(sofia_file, 'w')
wstd.write('writeCat.outputDir={:s}\n'.format(OUTPUT))
port2tigger = False
image = None
writecat = False
parameterise = False
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
if name == "port2tigger":
port2tigger = value
continue
if name == "steps.doWriteCat":
writecat = value
if name == "steps.doParameterise":
parameterise = value
if name == "import.inFile":
image = value
wstd.write('{0}={1}\n'.format(name, value))
wstd.close()
_runc = " ".join(['sofia_pipeline.py', sofia_file])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,606 | 21.319444 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_flagmanager/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/shadems/src/run.py | import os
import sys
import shlex
import yaml
import subprocess
import json
import shutil
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
if name == "ms":
ms = value
continue
elif name in ["debug", "iter-scan", "iter-field", "iter-corr", "iter-spw", "iter-antenna", "noconj", "noflags", "profile"]:
value = ""
if isinstance(value, list):
val = map(str, value)
args += ['{0}{1} {2}'.format(cab['prefix'], name, " ".join(val))]
continue
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]] + args + ["--dir", OUTPUT] + [ms])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,422 | 25.351852 | 127 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_gencal/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
from casacore.table import table
import os
import numpy
print(f"Running CASA task '{config.binary}'")
args = parameters_dict
task = crasa.CasaTask(config.binary, **args)
task.run()
gtab = args["caltable"]
if not os.path.exists(gtab):
raise RuntimeError(f"The gaintable was not created. Please refer to CASA {config.binary} logfile for further details")
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
field_in = args["field"].split(",")
try:
ids = list(map(int, field_in))
except ValueError:
ids = list(map(lambda a: field_names.index(a), field_in))
if not set(ids).issubset(field_ids):
raise RuntimeError(f"Some field(s) do not have solutions after the calibration. Please refer to CASA {config.binary} logfile for further details")
| 974 | 26.083333 | 150 | py |
Stimela | Stimela-master/stimela/cargo/cab/rfinder/src/run.py | import os
import sys
import yaml
import rfinder
import glob
import subprocess
import shlex
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
msname = None
pkg_path = os.path.dirname(os.path.realpath(rfinder.__file__))
rfinder_file = '{:s}/rfinder_default.yml'.format(pkg_path)
with open(rfinder_file) as f:
list_doc = yaml.load(f)
list_doc['general']['outdir'] = '{:s}/'.format(OUTPUT)
list_doc['general']['workdir'] = '{:s}/'.format(MSDIR)
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
if name == 'msname':
list_doc['general']['msname'] = value.split('/')[-1]
continue
for key, val in list_doc.items():
if type(val) == dict:
for k1, v1 in val.items():
if type(v1) == dict:
for k2, v2 in v1.items():
if k2 == name:
list_doc[key][k1][k2] = value
else:
if k1 == name:
list_doc[key][k1] = value
else:
if key == name:
list_doc[key] = value
edited_file = 'rfinder_default.yml'
with open(edited_file, "w") as f:
yaml.dump(list_doc, f)
_runc = 'rfinder -c %s' % edited_file
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,919 | 23.935065 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_mstransform/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_makemask/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
makemask_args = {}
immath_args = {}
for name, value in parameters_dict.items():
if value is None:
continue
if name in ['threshold', 'inpimage', 'output']:
if name in ['threshold']:
im_value = ' iif( IM0 >=%s, IM0, 0.0) ' % value
im_name = 'expr'
if name in ['output']:
im_value = '%s_thresh' % value
im_name = 'outfile'
if name in ['inpimage']:
im_value = value
im_name = 'imagename'
immath_args[im_name] = im_value
if name in ['mode', 'inpimage', 'inpmask', 'output', 'overwrite']:
makemask_args[name] = value
if 'expr' in immath_args:
task = crasa.CasaTask("immath", **immath_args)
task.run()
if 'inpmask' not in makemask_args:
makemask_args['inpmask'] = immath_args['outfile']
task = crasa.CasaTask(config.binary, **makemask_args)
task.run()
| 1,053 | 27.486486 | 70 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa47_bandpass/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
from casacore.tables import table
import numpy
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
gtab = args["caltable"]
if not os.path.exists(gtab):
raise RuntimeError("The gaintable was not created. Please refer to CASA {0:s} logfile for further details".format(cab["binary"]))
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
field_in = args["field"].split(",")
try:
ids = map(int, field_in)
except ValueError:
ids = map(lambda a: field_names.index(a), field_in)
if not set(ids).intersection(field_ids):
raise RuntimeError("None of the fields has solutions after the calibration. Please refer to the CASA {} logfile for further details".format(cab["binary"]))
| 1,667 | 23.895522 | 159 | py |
Stimela | Stimela-master/stimela/cargo/cab/autoflagger/src/run.py | import os
import sys
import shlex
import shutil
import subprocess
import yaml
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
msname = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
elif value is True:
value = ''
elif name == 'msname':
if isinstance(value, str):
msname = value
else:
msname = ' '.join(value)
continue
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
if msname is None:
raise RuntimeError('MS name has not be specified')
_runc = " ".join([cab['binary']] + args + [msname])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,324 | 22.660714 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_applycal/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/tigger_convert/src/run.py | import os
import sys
import subprocess
import yaml
import glob
import shutil
import shlex
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
mslist = []
field = []
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value in [False, None]:
continue
if name == "primary-beam":
value = "'{}'".format(value)
if name == 'pa-range' and hasattr(value, '__iter__'):
value = ','.join(value)
if value is True:
value = ""
if name == 'pa-from-ms' and hasattr(value, '__iter__'):
mslist = value
continue
if name == 'field-id'and hasattr(value, '__iter__'):
field = value
continue
# Positional arguments
if name == 'input-skymodel':
inlsm = value
continue
elif name == 'output-skymodel':
outlsm = value
continue
args.append('{0}{1} {2}'.format(cab['prefix'], name, value))
if mslist:
if len(field) == 0:
field = [0]*len(mslist)
pa_from_ms = ','.join(['{0}:{1}'.format(ms, i)
for ms, i in zip(mslist, field)])
args.append('--pa-from-ms {}'.format(pa_from_ms))
_runc = " ".join([cab['binary']] + args + [inlsm, outlsm])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,806 | 24.097222 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/aegean/src/run.py | import os
import sys
import numpy
import Tigger
import tempfile
import pyfits
import shutil
import shlex
import subprocess
import yaml
import glob
from astLib.astWCS import WCS
from astropy.table import Table
from Tigger.Models import SkyModel, ModelClasses
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
write_catalog = ['port2tigger', 'table']
write_opts = {}
img_opts = {}
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
if name == 'filename': # positional argument
args += ['{0}'.format(value)]
else:
if name != "port2tigger":
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
if name in write_catalog:
write_opts[name] = value
else:
img_opts[name] = value
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
port2tigger = write_opts.pop('port2tigger')
outfile = write_opts.pop('table')
image = img_opts.pop('filename')
if port2tigger:
write_opts['format'] = 'fits'
if not port2tigger:
sys.exit(0)
# convert to data file to Tigger LSM
# First make dummy tigger model
tfile = tempfile.NamedTemporaryFile(suffix='.txt')
tfile.flush()
prefix = os.path.splitext(outfile)[0]
tname_lsm = prefix + ".lsm.html"
with open(tfile.name, "w") as stdw:
stdw.write("#format:name ra_d dec_d i emaj_s emin_s pa_d\n")
model = Tigger.load(tfile.name)
tfile.close()
def tigger_src(src, idx):
name = "SRC%d" % idx
flux = ModelClasses.Polarization(float(src["int_flux"]), 0, 0, 0,
I_err=float(src["err_int_flux"]))
ra, ra_err = map(numpy.deg2rad, (float(src["ra"]), float(src["err_ra"])))
dec, dec_err = map(numpy.deg2rad, (float(src["dec"]),
float(src["err_dec"])))
pos = ModelClasses.Position(ra, dec, ra_err=ra_err, dec_err=dec_err)
ex, ex_err = map(numpy.deg2rad, (float(src["a"]), float(src["err_a"])))
ey, ey_err = map(numpy.deg2rad, (float(src["b"]), float(src["err_b"])))
pa, pa_err = map(numpy.deg2rad, (float(src["pa"]), float(src["err_pa"])))
if ex and ey:
shape = ModelClasses.Gaussian(
ex, ey, pa, ex_err=ex_err, ey_err=ey_err, pa_err=pa_err)
else:
shape = None
source = SkyModel.Source(name, pos, flux, shape=shape)
# Adding source peak flux (error) as extra flux attributes for sources,
# and to avoid null values for point sources I_peak = src["Total_flux"]
if shape:
source.setAttribute("I_peak", float(src["peak_flux"]))
source.setAttribute("I_peak_err", float(src["err_peak_flux"]))
else:
source.setAttribute("I_peak", float(src["int_flux"]))
source.setAttribute("I_peak_err", float(src["err_int_flux"]))
return source
data = Table.read('{0}_comp.{1}'.format(outfile.split('.')[0], outfile.split('.')[-1]),
format=outfile.split('.')[-1])
for i, src in enumerate(data):
model.sources.append(tigger_src(src, i))
wcs = WCS(image)
centre = wcs.getCentreWCSCoords()
model.ra0, model.dec0 = map(numpy.deg2rad, centre)
model.save(tname_lsm)
# Rename using CORPAT
subprocess.check_call(["tigger-convert", tname_lsm, "--rename", "-f"])
| 3,876 | 27.718519 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/rfimasker/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of MSs is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=None,
positional=["ms"], mandatory=["ms"],
repeat_dict=dict(ms=True))
# run the command
if prun(args) != 0:
sys.exit(1)
| 463 | 28 | 78 | py |
Stimela | Stimela-master/stimela/cargo/cab/rmclean3d/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of fields is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=True,
positional=["dirty-pdf", "rmsf-fwhm"], mandatory=["dirty-pdf", "rmsf-fwhm"])
# run the command
if prun(args) !=0:
sys.exit(1)
| 435 | 30.142857 | 118 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_plotms/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_uvcontsub/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_plotuv/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_importfits/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_immath/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
unstack_params = ['unstack', 'nchans', 'keep_casa_images', 'port2fits']
unstack_args = {}
immath_args = {}
def rm_fr(item):
os.system('rm -fr {}'.format(item))
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
if name in unstack_params:
unstack_args[name] = value
else:
immath_args[name] = value
unstack = unstack_args.pop(cab["binary"], False)
if not unstack:
task = crasa.CasaTask("immath", **immath_args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
else:
images = immath_args['imagename']
for image in images:
for i in range(unstack_args['nchans']):
ext = image.split('.')[-1]
chan_num = str(i)
outfile = '{:s}-{:s}.{:s}'.format(
immath_args['outfile'], chan_num, ext)
run_immath_args = immath_args.copy()
run_immath_args['imagename'] = image
run_immath_args['outfile'] = outfile
run_immath_args['chans'] = chan_num
task = crasa.CasaTask(cab["binary"], **run_immath_args)
task.run()
if unstack_args['port2fits']:
print('Converting CASA images to FITS images')
fits = outfile + '.fits'
task = crasa.CasaTask(
"exportfits", **dict(imagename=outfile, fitsimage=fits, overwrite=True))
task.run()
if not unstack_args['keep_casa_images']:
rm_fr(outfile)
| 2,223 | 28.263158 | 95 | py |
Stimela | Stimela-master/stimela/cargo/cab/catdagger/src/run.py | import os
import sys
import shlex
import shutil
import subprocess
import glob
import yaml
OUTPUT = os.environ["OUTPUT"]
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if name == "noise-map":
args += [value]
continue
if value is None:
continue
elif value is False:
continue
if param["dtype"] == "bool" and value:
args += ['{0}{1}'.format(cab['prefix'], name)]
continue
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,170 | 23.914894 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_tclean/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import astropy.io.fits as pyfits
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
noise_image = args.pop('noise_image', False)
if noise_image:
noise_sigma = args.pop('noise_sigma')
noise_hdu = pyfits.open(noise_image)
noise_data = noise_hdu[0].data
noise_std = noise_data.std()
threshold = noise_sigma*noise_std
args['threshold'] = '{}Jy'.format(threshold)
else:
args.pop('noise_sigma')
prefix = args['imagename']
port2fits = args.pop('port2fits', True)
keep_casa_images = args.pop("keep_casa_images", False)
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
nterms = args.get("nterms", 1)
images = ["flux", "model", "residual", "psf", "image", "mask", "pb", "sumwt"]
STD_IMAGES = images[:4]
convert = []
if port2fits:
for image in images:
img = "{:s}.{:s}".format(prefix, image)
if image == 'flux':
_images = [img]
elif nterms > 1:
_images = ["%s.tt%d" % (img, d) for d in range(nterms)]
if image == "image":
if nterms == 2:
alpha = img+".alpha"
alpha_err = img+".alpha.error"
_images += [alpha, alpha_err]
if nterms == 3:
beta = img+".beta"
beta_err = img+".beta.error"
_images += [beta, beta_err]
else:
_images = [img]
convert += _images
for _image in convert:
sys.stdout.write(_image)
if _image in STD_IMAGES and (not os.path.exists(_image)):
raise RuntimeError(
"Standard output from CASA clean task not found. Something went wrong durring cleaning, take a look at the logs and such")
elif os.path.exists(_image):
task = crasa.CasaTask(
"exportfits", **dict(imagename=_image, fitsimage=_image+".fits", overwrite=True))
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
if not keep_casa_images:
for _image in convert:
os.system("rm -rf {0:s}".format(_image))
| 3,301 | 29.293578 | 134 | py |
Stimela | Stimela-master/stimela/cargo/cab/rmsynth1d/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of fields is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=True,
positional=["dataFile"], mandatory=["dataFile"])
# run the command
if prun(args) != 0:
sys.exit(1)
| 408 | 28.214286 | 90 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa47_plotuv/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 927 | 21.634146 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_plotcal/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/calibrator/src/run.py | import numpy
import os
import sys
from pyrap.tables import table
import subprocess
import Cattery
from scabha import config, parameters_dict, prun, OUTPUT, log
CODE = os.path.join(os.environ["STIMELA_MOUNT"], "code")
CONFIG = os.environ["CONFIG"]
binary = config.binary
jdict = {}
for name, value in parameters_dict.items():
if value is None:
continue
if value is False:
value = 0
elif value is True:
value = 1
jdict[name] = value
msname = jdict['msname']
THREADS = jdict.pop('threads', 2)
msbase = os.path.basename(msname)[:-3]
prefix = jdict.pop('prefix', None) or '{0}/{1}'.format(OUTPUT, msbase)
params = {}
# options for writing flags
writeflags = jdict.pop("write-flags-to-ms", None)
if writeflags:
params["ms_sel.ms_write_flags"] = 1
params["ms_sel.ms_fill_legacy_flags"] = 1 if jdict.pop(
"fill-legacy-flags", False) else 0
write_flagset = jdict.pop("write-flagset", None)
if write_flagset:
params["ms_wfl.write_bitflag"] = write_flagset
params["ms_sel.ms_write_flag_policy"] = "'add to set'" if jdict.pop(
"write-flag-policy", False) else "'replace set'"
# Read flags options
readflagsets = jdict.pop("read-flagsets", False)
if readflagsets:
params["ms_rfl.read_flagsets"] = readflagsets
params['ms_sel.ms_read_flags'] = 1 if jdict.pop(
"read-flags-from-ms", False) else 0
params["ms_rfl.read_legacy_flags"] = 1 if jdict.pop(
"read-legacy-flags", False) else 0
params["ms_sel.msname"] = msname
field_id = jdict.pop("field-id", 0)
spw_id = jdict.pop("spw-id", 0)
params["ms_sel.tile_size"] = jdict.pop("tile-size", 16)
params["ms_sel.ddid_index"] = spw_id
params["ms_sel.field_index"] = field_id
TDL = jdict.pop("tdlconf", None) or CODE + "/tdlconf.profiles"
SECTION = jdict.pop("section", None) or "stefcal"
skymodel = jdict.pop("skymodel", None)
beam_files_pattern = jdict.pop("beam-files-pattern", False)
jones_type = jdict.pop("jones-implementation", "Gain2x2")
column = jdict.pop("column", "DATA")
outcol = jdict.pop("output-column", "CORRECTED_DATA")
params["ms_sel.input_column"] = column
params["ms_sel.output_column"] = outcol
params["tiggerlsm.filename"] = skymodel
params["tiggerlsm.lsm_subset"] = jdict.get("subset", "all")
params["do_output"] = jdict.pop("output-data", "CORR_RES")
saveconf = jdict.pop('save-config', None)
params['ms_sel.ms_corr_sel'] = "'{}'".format(jdict.pop('correlations', '2x2'))
label = jdict.pop("label", None)
model_column = jdict.pop("model-column", 'MODEL_DATA')
gjones = jdict.pop("Gjones", False)
if gjones:
time_smooth, freq_smooth = jdict.get("Gjones-smoothing-intervals", (1, 1))
time_int, freq_int = jdict.get("Gjones-solution-intervals", (1, 1))
mode = 'apply' if jdict.get('Gjones-apply-only', False) else 'solve-save'
gjones_gains = jdict.pop('Gjones-gain-table', None) or "{0}/{1}{2}.gain.cp".format(
OUTPUT, msbase, "-%s" % label if label else "")
params.update({
"stefcal_gain.enabled": 1,
"stefcal_gain.mode": mode,
"stefcal_gain.reset": 0 if mode == "apply" else 1,
"stefcal_gain.implementation": jones_type,
"stefcal_gain.timeint": time_int,
"stefcal_gain.freqint": freq_int,
"stefcal_gain.flag_ampl": jdict.get("Gjones-ampl-clipping", 0),
"stefcal_gain.flag_chisq": jdict.get("Gjones-chisq-clipping", 0),
"stefcal_gain.flag_chisq_threshold": jdict.get("Gjones-thresh-sigma", 10),
"stefcal_gain.flag_ampl_low": jdict.get("Gjones-ampl-clipping-low", 0.3),
"stefcal_gain.flag_ampl_high": jdict.get("Gjones-ampl-clipping-high", 2),
"stefcal_gain.implementation": jdict.get("Gjones-matrix-type", "Gain2x2"),
"stefcal_gain.table": gjones_gains,
})
bjones = jdict.pop("Bjones", False)
if bjones:
time_smooth, freq_smooth = jdict.get("Bjones-smoothing-intervals", (1, 0))
time_int, freq_int = jdict.get("Bjones-solution-intervals", (1, 0))
mode = 'apply' if jdict.get('Bjones-apply-only', False) else 'solve-save'
bjones_gains = jdict.pop('Bjones-gain-table', None) or "{0}/{1}{2}.gain1.cp".format(
OUTPUT, msbase, "-%s" % label if label else "")
params.update({
"stefcal_gain1.enabled": 1,
"stefcal_gain1.label": 'B',
"stefcal_gain1.mode": mode,
"stefcal_gain1.reset": 0 if mode == "apply" else 1,
"stefcal_gain1.implementation": jones_type,
"stefcal_gain1.timeint": time_int,
"stefcal_gain1.freqint": freq_int,
"stefcal_gain1.flag_ampl": jdict.get("Bjones-ampl-clipping", 0),
"stefcal_gain1.flag_chisq": jdict.get("Bjones-chisq-clipping", 0),
"stefcal_gain1.flag_chisq_threshold": jdict.get("Bjones-thresh-sigma", 10),
"stefcal_gain1.flag_ampl_low": jdict.get("Bjones-ampl-clipping-low", 0.3),
"stefcal_gain1.flag_ampl_high": jdict.get("Bjones-ampl-clipping-high", 2),
"stefcal_gain1.implementation": jdict.get("Bjones-matrix-type", "Gain2x2"),
"stefcal_gain1.table": bjones_gains,
})
beam = jdict.pop("Ejones", False)
if beam and beam_files_pattern:
params.update({
"me.e_enable": 1,
"me.p_enable": 1,
"me.e_module": "Siamese_OMS_pybeams_fits",
"me.e_all_stations": 1,
"pybeams_fits.sky_rotation": 1 if jdict.pop('parallactic-angle-rotation', False) else 0,
"pybeams_fits.l_axis": jdict.pop("beam-l-axis", "L"),
"pybeams_fits.m_axis": jdict.pop("beam-m-axis", "M"),
"pybeams_fits.filename_pattern": "'{}'".format(beam_files_pattern),
})
ddjones = jdict.pop("DDjones", False)
if ddjones:
time_int, freq_int = jdict.pop("DDjones-solution-intervals", (1, 1))
time_smooth, freq_smooth = jdict.pop("DDjones-smoothing-intervals", (1, 1))
mode = 'apply' if jdict.get('DDjones-apply-only', False) else 'solve-save'
ddjones_gains = jdict.pop('DDjones-gain-table', None) or "{0}/{1}{2}.diffgain.cp".format(
OUTPUT, msbase, "-%s" % label if label else "")
params.update({
"stefcal_diffgain.enabled": 1,
"stefcal_diffgain.reset": 0 if mode == "apply" else 1,
"stefcal_diffgain.flag_ampl": 0,
"stefcal_diffgain.flag_chisq": 1,
"stefcal_diffgain.flag_chisq_threshold": 5,
"stefcal_diffgain.freqint": freq_int,
"stefcal_diffgain.freqsmooth": freq_smooth,
"stefcal_diffgain.implementation": jones_type,
"stefcal_diffgain.label": jdict.pop("DDjones-tag", "dE"),
"stefcal_diffgain.max_diverge": 1,
"stefcal_diffgain.mode": mode,
"stefcal_diffgain.niter": 50,
"stefcal_diffgain.omega": 0.5,
"stefcal_diffgain.quota": 0.95,
"stefcal_diffgain.table": ddjones_gains,
"stefcal_diffgain.timeint": time_int,
"stefcal_diffgain.timesmooth": time_smooth,
"stefcal_diffgain.implementation": jdict.get("DDjones-matrix-type", "Gain2x2"),
})
ifrjones = jdict.pop("DDjones", False)
if ifrjones:
ifrjones_gains = jdict.pop('IFRjones-gain-table', None) or "{0}/{1}{2}.ifrgain.cp".format(
OUTPUT, msbase, "-%s" % label if label else "")
mode = 'apply' if jdict.get('IFRjones-apply-only', False) else 'solve-save'
params.update({
"stefcal_ifr_gain_mode": mode,
"stefcal_ifr_gains": 1,
"stefcal_ifr_gain_reset": 0 if mode == "apply" else 1,
"stefcal_reset_ifr_gains": 0 if mode == "apply" else 1,
"stefcal_ifr_gain.table": ifrjones_gains,
})
makeplots = jdict.pop("make-plots", False)
gjones_plotprefix = prefix+"-gjones_plots"
bjones_plotprefix = prefix+"-bjones_plots"
ddjones_plotprefix = prefix+"-ddjones_plots"
ifrjones_plotprefix = prefix+"-ifrjones_plots"
def run_meqtrees(msname):
prefix = ["--mt %d -c %s [%s]" % (THREADS, TDL, SECTION)]
CATTERY_PATH = os.path.dirname(Cattery.__file__)
suffix = ["%s/Calico/calico-stefcal.py =stefcal" % CATTERY_PATH]
options = {}
options.update(params)
if jdict.pop("add-vis-model", 0):
options["read_ms_model"] = 1
options["ms_sel.model_column"] = model_column
taql = jdict.get('data-selection', None)
if taql:
options["ms_sel.ms_taql_str"] = taql
args = []
for key, value in options.items():
if isinstance(value, str) and value.find(' ') > 0:
value = '"{:s}"'.format(value)
args.append('{0}={1}'.format(key, value))
args = prefix + args + suffix
_runc = " ".join([binary] + args + \
['-s {}'.format(saveconf) if saveconf else ''])
if prun(_runc) !=0:
sys.exit(1)
log.info("MeqTrees Done!")
# now plot the gains
if makeplots:
log.info("Preparing to make gain plots")
import Owlcat.Gainplots as plotgains
feed_tab = table(msname+"/FEED")
log.info("Extracting feed type from MS")
feed_type = set(feed_tab.getcol("POLARIZATION_TYPE")['array'])
feed_type = "".join(feed_type)
log.info("Feed type is [%s]" % feed_type)
if feed_type.upper() in ["XY", "YX"]:
feed_type = "XY"
else:
feed_type = "RL"
if gjones:
log.info("Making Gain plots (G)...")
plotgains.make_gain_plots(
gjones_gains, prefix=gjones_plotprefix, feed_type=feed_type)
if bjones:
log.info("Making Gain plots (B)...")
plotgains.make_gain_plots(
bjones_gains, gain_label='B', prefix=bjones_plotprefix, feed_type=feed_type)
if ddjones:
log.info("Making differential gain plots...")
plotgains.make_diffgain_plots(
ddjones_gains, prefix=ddjones_plotprefix, feed_type=feed_type)
if ifrjones:
log.info("Making IFR gain plots...")
plotgains.make_ifrgain_plots(
ifrjones_gains, prefix=ifrjones_plotprefix, feed_type=feed_type)
run_meqtrees(msname)
| 9,936 | 36.640152 | 97 | py |
Stimela | Stimela-master/stimela/cargo/cab/owlcat_plotelev/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of fields is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=True,
positional=["msname"], mandatory=["msname"])
# run the command
if prun(args) != 0:
sys.exit(1)
| 403 | 30.076923 | 86 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_exportfits/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_polfromgain/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
save_result = parameters_dict.pop("save_result", None)
task = crasa.CasaTask(config.binary, save_result=save_result, **parameters_dict)
task.run()
| 307 | 27 | 80 | py |
Stimela | Stimela-master/stimela/cargo/cab/lwimager/src/run.py | import pyrap.images
import os
import sys
from pyrap.tables import table
from MSUtils import msutils
import tempfile
import astropy.io.fits as pyfits
import subprocess
import shlex
import shutil
import yaml
CONFIG = os.environ['CONFIG']
OUTPUT = os.environ['OUTPUT']
INPPUT = os.environ['INPUT']
MSDIR = os.environ['MSDIR']
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
params = cab['parameters']
tdir = tempfile.mkdtemp(dir='.')
os.chdir(tdir)
def rm_fr(item):
os.system('rm -fr {}'.format(item))
def _run(prefix=None, predict=False, **kw):
port2fits = kw.pop('port2fits', True)
keep_casa_images = kw.pop('keep_casa_images', False)
if predict:
args = ['{0}={1}'.format(a, b) for a, b in kw.iteritems()]
_runc = " ".join([cab["binary"]] + args)
subprocess.check_call(shlex.split(_runc))
return
if kw.get('niter', 0) > 0:
if kw.get('operation', None) not in ['clark', 'hogbom', 'csclean', 'multiscale', 'entropy']:
kw['operation'] = 'csclean'
images = {
"restored": ['{0}.restored.{1}'.format(prefix, a) for a in ['fits', 'img']],
"model": ['{0}.model.{1}'.format(prefix, a) for a in ['fits', 'img']],
"residual": ['{0}.residual.{1}'.format(prefix, a) for a in ['fits', 'img']],
}
elif kw.get('niter', 0) == 0:
kw["operation"] = 'image'
images = {
"image": ['{0}.dirty.{1}'.format(prefix, a) for a in ['fits', 'img']],
}
for key, value in images.iteritems():
kw[key] = value[1]
args = ['{0}={1}'.format(a, b) for a, b in kw.iteritems()]
_runc = " ".join([cab["binary"]] + args)
subprocess.check_call(shlex.split(_runc))
if port2fits:
print('Converting CASA iamges to FITS images')
for fits, img in images.itervalues():
im = pyrap.images.image(img)
im.tofits(fits, overwrite=True, velocity=kw.get(
'prefervelocity', False))
if not keep_casa_images:
rm_fr(img)
def predict_vis(msname, image, column="MODEL_DATA",
chanchunk=None, chanstart=0, chanstep=1):
"""Converts image into predicted visibilities"""
# CASA to convert them
casaimage = '{0}/{1}.img'.format(OUTPUT, os.path.basename(image))
# convert to CASA image
img = pyrap.images.image(image)
img.saveas(casaimage)
imgshp = img.shape()
ftab = table(msname+'/SPECTRAL_WINDOW')
numchans = ftab.getcol('NUM_CHAN')[0]
# default chunk list is entire chanel range. Update this if needed
chunklist = [(0, numchans, None, None)]
if len(imgshp) == 4 and imgshp[0] > 1:
nimgchan = imgshp[0]
print("image cube has {0} channels, MS has {1} channels".format(
nimgchan, numchans))
imgchansize = imgshp[1]*imgshp[2]*imgshp[3] * \
4 # size of an image channel in bytes
if chanchunk is None:
mem_bytes = os.sysconf('SC_PAGE_SIZE') * \
os.sysconf('SC_PHYS_PAGES') # e.g. 4015976448
chanchunk = max((mem_bytes/20)/imgchansize, 1)
print("based on available memory ({0}), max image chunk is {1} channels".format(
mem_bytes, chanchunk))
if chanchunk < nimgchan:
mschanstep = numchans*chanstep/nimgchan
if numchans % nimgchan:
print(
"MS channels not evenly divisible into $nimgchan image channels, chunking may be incorrect")
chunklist = []
for chan0 in range(0, nimgchan, chanchunk):
imch0, imch1 = chan0, (min(chan0+chanchunk, nimgchan)-1)
msch0 = chanstart + imch0*mschanstep
msnch = (imch1-imch0+1)*mschanstep/chanstep
# overlap each chunk from 1 onwards by a half-chunk back to take care of extrapolated visibilties
# from previous channel
if imch0:
imch0 -= 1
msch0 -= mschanstep/2
msnch += mschanstep/2
print("image chunk {0}~{1} corresponds to MS chunk {2}~{3}".format(
imch0, imch1, msch0, msch0+msnch-1))
chunklist.append((msch0, msnch, imch0, imch1))
# even in fill-model mode where it claims to ignore image parameters, the image channelization
# arguments need to be "just so" as per below, otherwise it gives a GridFT: weights all zero message
kw0 = {}
kw0.update(ms=msname, model=casaimage,
niter=0, fixed=1, mode="channel", operation="csclean",
img_nchan=1, img_chanstart=chanstart, img_chanstep=numchans*chanstep)
kw0['fillmodel'] = 1
blc = [0]*len(imgshp)
trc = [x-1 for x in imgshp]
# now loop over image frequency chunks
for ichunk, (mschanstart, msnumchans, imgch0, imgch1) in enumerate(chunklist):
if len(chunklist) > 1:
blc[0], trc[0] = imgch0, imgch1
print("writing CASA image for slice {0} {1}".format(blc, trc))
casaimage1 = "{0}.{1}.img".format(image, ichunk)
rm_fr(casaimage1)
print("writing CASA image for slice {0} {1} to {2}".format(
blc, trc, casaimage1))
img.subimage(blc, trc, dropdegenerate=False).saveas(casaimage1)
kw0.update(model=casaimage1)
else:
img.unlock()
# setup imager options
kw0.update(chanstart=mschanstart, chanstep=chanstep, nchan=msnumchans)
print("predicting visibilities into MODEL_DATA")
_run(predict=True, **kw0)
if len(chunklist) > 1:
rm_fr(casaimage1)
rm_fr(casaimage)
if column != "MODEL_DATA":
print('Data was predicted to MODEL_DATA column. Will now copy it to the {} column as requested'.format(column))
msutils.copycol(msname=msname, fromcol="MODEL_DATA", tocol=column)
options = {}
for param in params:
value = param['value']
name = param['name']
if name == 'prefix':
prefix = value
continue
if value is None:
continue
if name == 'cellsize':
if isinstance(value, (float, int)):
value = '{}arcsec'.format(value)
elif name in ['threshold', 'targetflux']:
if isinstance(value, float):
value = '{}arcsec'.format(value)
options[name] = value
noise_image = options.pop('noise_image', False)
if noise_image:
noise_sigma = options.pop('noise_sigma')
noise_hdu = pyfits.open(noise_image)
noise_data = noise_hdu[0].data
noise_std = noise_data.std()
threshold = noise_sigma*noise_std
options['threshold'] = '{}Jy'.format(threshold)
else:
options.pop('noise_sigma')
predict = options.pop('simulate_fits', False)
if predict:
tfile = tempfile.NamedTemporaryFile(suffix='.fits')
tfile.flush()
cell = options.get('cellsize', None)
if cell is None:
with pyfits.open(predict) as _hdu:
if hasattr(_hdu, '__iter__'):
hdu = _hdu[0]
else:
hdu = _hdu
cdelt = hdu.header.get('CDELT1', None)
if cdelt:
cell = '{:f}arcsec'.format(abs(cdelt)*3600)
if cell is None:
raise RuntimeError('The size of a pixel in this FITS image was not specified \
in FITS header (CDELT1/2), or as parameter for this module ("cellsize"). Cannot proceed')
_runc = " ".join(['python /scratch/code/predict_from_fits.py'] + [predict, options['ms'], cell,
tfile.name])
subprocess.check_call(shlex.split(_runc))
predict_vis(msname=options['ms'], image=tfile.name, column=options.get('data', 'MODEL_DATA'),
chanchunk=options.get('chanchunk', None), chanstart=options.get('img_chanstart', 0),
chanstep=options.get('img_chanstep', 1))
tfile.close()
else:
_run(prefix, **options)
os.chdir(OUTPUT)
os.system('rm -r {}'.format(tdir))
| 8,053 | 34.324561 | 119 | py |
Stimela | Stimela-master/stimela/cargo/cab/lwimager/src/predict_from_fits.py | import pyfits as fitsio
import numpy
from pyrap.tables import table
import numpy
import os
import sys
import tempfile
import subprocess
import shlex
imagename = sys.argv[1]
msname = sys.argv[2]
cell = sys.argv[3]
hdulist = fitsio.open(imagename)
if isinstance(hdulist, list):
hdu = hdulist[0]
else:
hdu = hdulist
tfile = tempfile.NamedTemporaryFile(suffix='.fits')
tfile.flush()
nchan = hdu.data.shape[0]
sys.stdout.write('Creating template image\n')
os.system('rm -fr {0}'.format(tfile.name))
_runc = " ".join(['lwimager'] + ['ms='+msname, 'fits='+tfile.name, 'data=psf', 'npix=64',
'mode=mfs', 'nchan={:d}'.format(
nchan), 'img_nchan={:d}'.format(nchan),
'prefervelocity=False', 'cellsize={0}'.format(cell)])
subprocess.check_call(shlex.split(_runc))
sys.stdout.write(
'Created template image. Will now proceed to simulate FITS model into MS\n')
with fitsio.open(tfile.name) as hdu0:
header = hdu0[0].header
tfile.close()
naxis = hdu.header['NAXIS']
raNpix = hdu.header['NAXIS1']
decNpix = hdu.header['NAXIS2']
header['CRPIX1'] = raNpix/2.0
header['CRPIX2'] = decNpix/2.0
if naxis < 2:
raise RuntimeError('FITS image has to have at least two axes')
elif naxis == 2:
hdu.data = hdu.data[numpy.newaxis, numpy.newaxis, ...]
sys.stdout.write(
'WARNING::FITS image has 2 axes. Will add STOKES and FREQ axes\n')
elif naxis == 3:
sys.stdout.write('WARNING::FITS image has 3 axes.\n')
# Try to find out what is the 3rd axis
third_axis = hdu.header.get('CTYPE3', None)
if third_axis is None:
sys.stdout.write(
'WARNING:: Third axis is not labelled, assuming its the FREQ Axis\n')
hdu.data = hdu.data[:, numpy.newaxis, ...]
elif third_axis.lower() == 'freq':
hdu.data = hdu.data[:, numpy.newaxis, ...]
elif third_axis.lower() == 'stokes':
hdu.data = hdu.data[numpy.newaxis, ...]
else:
sys.stdout.write(
'WARNING:: CTYPE3 value [{}] is unknown, will ignore it and assume 3rd axis is FREQ\n'.format(third_axis))
hdu.data = hdu.data[:, numpy.newaxis, ...]
elif naxis == 4:
sys.stdout.write(
'FITS image has four axes. If the axes are labeled (via CTYPE<axis index>), will attempt to restructure it so it is fit to predict from\n')
third_axis = hdu.header.get('CTYPE3', None)
if third_axis is None:
raise RuntimeError(
'ABORT:: FITS axes are not labelled. Cannot proceed. Please label STOKES and FREQ accordingly\n')
elif third_axis.lower() == 'freq':
hdu.data = numpy.rollaxis(hdu.data, 1)
else:
sys.stdout.write(
'WARNING:: FITS image has more than 4 axes. Passing the buck to lwimger\n')
hdu.header = header
hdulist.writeto(sys.argv[4], clobber=True)
| 2,835 | 30.511111 | 147 | py |
Stimela | Stimela-master/stimela/cargo/cab/mProjectCube/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of fields is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=True)
# run the command
if prun(args) != 0:
raise SystemError(f" {config.binary} exited with a non-zero code. See logs for details")
| 394 | 29.384615 | 92 | py |
Stimela | Stimela-master/stimela/cargo/cab/simulator/src/run.py | import numpy
import os
import sys
from pyrap.tables import table
import yaml
import math
import Cattery
from scabha import config, parameters_dict, prun
CODE = os.path.join(os.environ["STIMELA_MOUNT"], "code")
CONFIG = os.environ["CONFIG"]
def compute_vis_noise(msname, sefd, spw_id=0):
"""Computes nominal per-visibility noise"""
tab = table(msname)
spwtab = table(msname + "/SPECTRAL_WINDOW")
freq0 = spwtab.getcol("CHAN_FREQ")[spw_id, 0]
wavelength = 300e+6/freq0
bw = spwtab.getcol("CHAN_WIDTH")[spw_id, 0]
dt = tab.getcol("EXPOSURE", 0, 1)[0]
dtf = (tab.getcol("TIME", tab.nrows()-1, 1)-tab.getcol("TIME", 0, 1))[0]
# close tables properly, else the calls below will hang waiting for a lock...
tab.close()
spwtab.close()
print(">>> %s freq %.2f MHz (lambda=%.2fm), bandwidth %.2g kHz, %.2fs integrations, %.2fh synthesis" % (
msname, freq0*1e-6, wavelength, bw*1e-3, dt, dtf/3600))
noise = sefd/math.sqrt(abs(2*bw*dt))
print(">>> SEFD of %.2f Jy gives per-visibility noise of %.2f mJy" %
(sefd, noise*1000))
return noise
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
_params = parameters_dict
params = {}
options = {}
for name, value in _params.items():
if value is None:
continue
params[name] = value
options["ms_sel.ddid_index"] = params.get('spw-id', 0)
options["ms_sel.field_index"] = params.get('field-id', 0)
tdlconf = params.get("tdlconf", None) or CODE+"/tdlconf.profiles"
section = params.get("section", None) or 'sim'
mode = params.pop("mode", None) or "simulate"
threads = params.pop("threads", 4)
skymodel = params.pop("skymodel", None)
beam_files_pattern = params.pop("beam-files-pattern", False)
if isinstance(skymodel, str):
if os.path.exists(skymodel):
options['me.sky.tiggerskymodel'] = 1
options["tiggerlsm.filename"] = skymodel
else:
raise RuntimeError("ABORT: Could not find the skymodel")
modes = {
"simulate": 'sim only',
"add": "add to MS",
"subtract": 'subtract from MS',
}
column = params.pop("column", "CORRECTED_DATA")
incol = params.pop("input-column", "CORRECTED_DATA")
msname = params['msname']
options["ms_sel.msname"] = msname
options["sim_mode"] = modes[mode]
options["ms_sel.input_column"] = incol
options["ms_sel.output_column"] = column
options["me.use_smearing"] = 1 if params.pop('smearing', False) else 0
saveconf = params.pop('save-config', None)
addnoise = params.pop("addnoise", False)
options["ms_sel.tile_size"] = params.pop("tile-size", 16)
if addnoise:
noise = params.pop("noise", 0) or compute_vis_noise(
msname, params.pop("sefd", 551))
options["noise_stddev"] = noise
gjones = params.pop("Gjones", False)
if gjones:
gain_opts = {
"me.g_enable": 1,
"oms_gain_models.err-gain.error_model": "SineError",
"oms_gain_models.err-gain.max_period": params.pop("gain-max-period", 2),
"oms_gain_models.err-gain.maxval": params.pop("gain-max-error", 1.2),
"oms_gain_models.err-gain.min_period": params.pop("gain-min-period", 1),
"oms_gain_models.err-gain.minval": params.pop("gain-min-error", 0.8),
}
options.update(gain_opts)
phase_opts = {
"oms_gain_models.err-phase.error_model": "SineError",
"oms_gain_models.err-phase.max_period": params.pop("phase-max-period", 2),
"oms_gain_models.err-phase.maxerr": params.pop("phase-max-error", 30.0),
"oms_gain_models.err-phase.min_period": params.pop("phase-min-period", 1),
"oms_gain_models.err-phase.minval": params.pop("phase-min-error", 5),
}
options.update(phase_opts)
beam = params.pop("Ejones", False)
if beam and beam_files_pattern:
beam_opts = {
"me.e_enable": 1,
"me.p_enable": 1,
"me.e_module": "Siamese_OMS_pybeams_fits",
"pybeams_fits.sky_rotation": 1 if params.pop('parallactic-angle-rotation', False) else 0,
"me.e_all_stations": 1,
"pybeams_fits.l_axis": params.pop("beam-l-axis", "L"),
"pybeams_fits.m_axis": params.pop("beam-m-axis", "M"),
"pybeams_fits.filename_pattern": "'{}'".format(beam_files_pattern),
}
options.update(beam_opts)
rms_perr = params.get("pointing-accuracy", 0)
# Include pointing errors if needed
if rms_perr:
anttab = table(msname + "/" + "ANTENNA")
NANT = anttab.nrows()
options["me.epe_enable"] = 1
perr = numpy.random.randn(
NANT)*rms_perr, numpy.random.randn(NANT)*rms_perr
ll, mm = " ".join(map(str, perr[0])), " ".join(map(str, perr[-1]))
options['oms_pointing_errors.pe_l.values_str'] = "'%s'" % ll
options['oms_pointing_errors.pe_m.values_str'] = "'%s'" % mm
field_center = params.pop("field-center", None)
if field_center and skymodel:
if field_center.lower() == "ms":
ftab = table(msname+"/FIELD")
ra, dec = ftab.getcol("PHASE_DIR")[params.get('field-id', 0)][0]
field_center = "J2000,%frad,%frad" % (ra, dec)
tmp = "recentered_"+os.path.basename(skymodel)
prun(["tigger-convert", "--recenter", field_center, skymodel, tmp, "-f"])
options["tiggerlsm.filename"] = tmp
prefix = ['-s {}'.format(saveconf) if saveconf else ''] + \
["--mt {0} -c {1} [{2}]".format(threads, tdlconf, section)]
CATTERY_PATH = os.path.dirname(Cattery.__file__)
suffix = ["%s/Siamese/turbo-sim.py =_simulate_MS" % CATTERY_PATH]
args = []
for key, value in options.items():
if isinstance(value, str) and value.find(' ') > 0:
value = '"{:s}"'.format(value)
args.append('{0}={1}'.format(key, value))
_runc = " ".join([config.binary] + prefix + args + suffix)
exitcode = prun(_runc)
if exitcode != 0:
raise RuntimeError(f"Meqtrees failed with exit code {exitcode}. See logs for more details")
| 5,845 | 33.591716 | 108 | py |
Stimela | Stimela-master/stimela/cargo/cab/cubical/src/run.py | # -*- coding: future_fstrings -*-
import os
import sys
import shlex
import configparser
import ast
from scabha import config, parameters_dict, prun
args = {}
parset = []
for name, value in parameters_dict.items():
if value is None:
continue
elif value is False:
value = 0
elif value is True:
value = 1
elif name == 'parset':
parset = [value]
continue
elif isinstance(value, list):
value = ",".join(map(str, value))
args[name] = value
# available jones terms
joneses = "g b dd".split()
try:
soljones = args.pop("sol-jones")
except KeyError:
conf = configparser.SafeConfigParser(inline_comment_prefixes="#")
conf.read(parset[0])
if 'jones' in conf.options('sol'):
soljones = conf.get('sol', 'jones')
if "[" in soljones:
soljones = ast.literal_eval(soljones)
else:
soljones = soljones.split(",")
else:
soljones = ['g', 'de']
if type(soljones) is list:
soljones = ",".join(soljones)
for jones in joneses:
if jones.lower() not in soljones.lower():
jopts = filter(lambda a: a.startswith(
"{0:s}-".format(jones)), args.keys())
for item in list(jopts):
del args[item]
opts = ["{0:s}sol-jones {1:s}".format(config.prefix, soljones)] + \
['{0}{1} {2}'.format(config.prefix, name, value)
for name, value in args.items()]
_runc = " ".join([config.binary] + parset + opts)
argslist = shlex.split(_runc)
# run the command
if prun(argslist) != 0:
sys.exit(1)
| 1,571 | 23.5625 | 69 | py |
Stimela | Stimela-master/stimela/cargo/cab/msutils/src/run.py | import sys
import os
from MSUtils import msutils
import MSUtils.ClassESW as esw
import inspect
from MSUtils.imp_plotter import gain_plotter
import glob
import shutil
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if name == "command":
function = value
continue
args[name] = value
if function == 'sumcols':
args['outcol'] = args.pop('colname')
if function == "estimate_weights":
msnoise = esw.MSNoise(args['msname'])
if isinstance(args['stats_data'], str) and args['stats_data'].find('use_package_meerkat_spec') >= 0:
args['stats_data'] = esw.MEERKAT_SEFD
# Calculate noise/weights from spec
noise, weights = msnoise.estimate_weights(stats_data=args['stats_data'],
smooth=args['smooth'],
fit_order=args['fit_order'],
plot_stats=args.get('plot_stats', None))
if args['write_to_ms']:
msnoise.write_toms(noise, columns=args['noise_columns'])
msnoise.write_toms(weights, columns=args['weight_columns'])
sys.exit(0)
if function == "plot_gains":
tab = args['ctable']
tabtype = args['tabtype']
dpi = args['plot_dpi']
scale = args['subplot_scale']
outfile = args['plot_file']
gain_plotter(tab, tabtype, outfile, scale, dpi)
sys.exit(0)
run_func = getattr(msutils, function, None)
if run_func is None:
raise RuntimeError("Function '{}' is not part of MSUtils".format(function))
# Filter default parameters that are part of this function
func_args = inspect.getargspec(run_func)[0]
_args = {}
for arg in args.keys():
if arg in func_args:
_args[arg] = args[arg]
try:
run_func(**_args)
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 2,371 | 27.578313 | 104 | py |
Stimela | Stimela-master/stimela/cargo/cab/mosaicsteward/src/run.py | import os
import sys
import subprocess
import shlex
import glob
import shutil
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
targets = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
elif value is True:
value = ''
elif name == 'target-images':
targets = value
continue
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
indir = os.path.dirname(targets[0])
target_names = map(os.path.basename, targets)
target_images = "--target-images " + " --target-images ".join(target_names)
args += ["--input {0:s} {1:s} --output {2:s}".format(indir, target_images,
OUTPUT)]
if not target_images:
raise RuntimeError('Filenames of the images to be mosaicked have not been specified.')
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,532 | 24.983051 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/politsiyakat_cal_phase/src/run.py | import sys
import os
import json
import yaml
import subprocess
import shlex
import shutil
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
tasksuite = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
kwargs = "'{}'".format(json.dumps(args))
ARGS = ["flag_phase_drifts",
"-s antenna_mod",
kwargs]
_runc = " ".join([cab['binary']] + ARGS)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,050 | 20.44898 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/flagms/src/run.py | import os
import sys
import shlex
import shutil
import subprocess
import yaml
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value in [False, None]:
continue
if name in 'channels timeslots corrs stations ddid field str'.split():
if name in 'channels timeslots'.split():
value = ':'.join(value)
else:
value = ','.join(value)
if value is True:
value = ""
if name == 'msname':
msname = value
if isinstance(msname, str):
msname = [msname]
continue
if name == 'flagged-any':
args += ['{0}flagged-any {1}'.format(cab['prefix'], a) for a in value]
continue
args.append('{0}{1} {2}'.format(cab['prefix'], name, value))
_runc = " ".join([cab["binary"]] + args + msname)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,479 | 23.666667 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/ragavi_vis/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parameters, prun
args = [config.binary]
# convert arguments to flat list of PrefixName Arguments
for name, value in parameters.items():
if value in [None, "", " ", False]:
continue
args.append(f'{config.prefix}{name}')
if not isinstance(value, list):
value = [value]
args += list(map(str, value))
# run the command
if prun(args) != 0:
sys.exit(1)
| 462 | 19.130435 | 56 | py |
Stimela | Stimela-master/stimela/cargo/cab/crystalball/src/run.py | import os
import sys
import glob
import shutil
import shlex
import subprocess
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
ms = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
elif value is True:
value = ''
if name == "ms":
ms = value
continue
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join( [cab["binary"]] + args + [ms] )
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,144 | 21.45098 | 91 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.