keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | Aswendt-Lab/AIDAmri | ARA/AllenBrainAPI-master/getAllenStructureList.m | .m | 10,526 | 284 | function [ARA_table,tableRowInds] = getAllenStructureList(varargin)
% Download the list of adult mouse structures from the Allen API.
%
% function ARA_table = getAllenStructureList('param1',val1,...)
%
%
% Purpose
% Make an API query to read in the Allen Reference Atlas (ARA) brain area
% list. All areas and data are read. Data are cached in the system's temp
% directory and re-read from here if possible to improve speed.
%
%
% Inputs (all optional param/val pair)
% 'downloadAgain' - [false] If true, the function wipes cached data and
% re-reads. zero by default.
% 'ancestorsOf' - [empty] Returns only those areas that are ancestors of
% of the named area. You may supply a string, numeric scalar,
% or a cell array that combines these to return a table that
% contains the acestors of multiple areas. If the ID or
% structure name can not be found, the function returns an
% empty array and displays a warning on-screen.
% 'childrenOf' - [empty] Returns only those areas that are children of
% of the named area. As above, you may supply a string, numeric
% scalar, or a cell array that combines these.
% 'excludeReferenceArea' - [false] if true, the areas supplied by the childrenOf
% and ancestorOf arguments are removed from the
% outputs.
%
%
% Outputs
% ARA_table - a table containing the imported data.
% tableRowInds - The rows in the original table that are present in ARA_table.
% If the user has selected a subset of data using the childrenOf
% of or ancestorsOf arguments, then tableRowInds is:
% ARA_table = ORIGINAL_TABLE(tableRowInds,:);
%
%
% Examples
%
% a) Basic usage
% S=getAllenStructureList;
%
% b) Returning subsets of the data
% S=getAllenStructureList('ancestorsOf',{'Posterior auditory area, layer 1',1017})
% S=getAllenStructureList('ancestorsOf','Posterior auditory area, layer 1')
% S=getAllenStructureList('childrenOf','Cerebellum')
% S=getAllenStructureList('childrenOf','Cerebellum','ancestorsOf','Posterior auditory area, layer 1')
%
% c) Remove the cerebellum
% S=getAllenStructureList;
% [~,ind]=getAllenStructureList('childrenOf','Cerebellum');
% S(ind,:)=[];
%
% d) Remove the cerebellum's children but keep the cerebellum
% S=getAllenStructureList;
% [~,ind]=getAllenStructureList('childrenOf','Cerebellum', 'excludeReferenceArea',true);
% S(ind,:)=[];
%
%
%
% Rob Campbell - Basel 2015
if nargin==1
%Alert anyone who might be using the old scheme
fprintf('\n\n')
help mfilename
error('You supplied only one input argument')
return
end
% Parse input aruments
params = inputParser;
params.CaseSensitive = false;
params.addParamValue('downloadAgain', false, @(x) islogical(x) || x==0 || x==1);
params.addParamValue('ancestorsOf', {}, @(x) ischar(x) || isnumeric(x) || iscell(x))
params.addParamValue('childrenOf', {}, @(x) ischar(x) || isnumeric(x) || iscell(x))
params.addParamValue('excludeReferenceArea', false, @(x) islogical(x) || x==0 || x==1);
params.parse(varargin{:})
downloadAgain = params.Results.downloadAgain;
%Ensure that ancestorsOf and chilrenOf are cell arrays of IDs or names in order to simplify later code
ancestorsOf = checkFilteringInput(params.Results.ancestorsOf);
childrenOf = checkFilteringInput(params.Results.childrenOf);
excludeReferenceArea = params.Results.excludeReferenceArea;
%Cached files will be stored here
cachedCSV = fullfile(tempdir,sprintf('%s_CACHED.csv',mfilename));
cachedMAT = fullfile(tempdir,sprintf('%s_CACHED.mat',mfilename));
if ~exist(cachedMAT,'file') || downloadAgain
% The data are to be re-read or we couldn't find any cached data
% The adult mouse structure graph has an id of 1.
url='http://api.brain-map.org/api/v2/data/Structure/query.csv?criteria=[graph_id$eq1]&num_rows=all';
[~,status] = urlwrite(url,cachedCSV);
if ~status
error('Failed to get CSV file from URL %s', url)
end
fid = fopen(cachedCSV);
if fid<0
error('Failed to open CSV file at %s\n', cachedCSV)
end
col_names = strsplit(fgetl(fid),','); %The names of the columns in the main cell array
%Loop through and read each data row
readParams={'%d%d%q%q%d%d%d%d%d%d%d%d%s%s%s%s%s%d%d%d%s\n','delimiter',','};
ARA_table=textscan(fid,readParams{:});
fclose(fid);
ARA_table=readtable(cachedCSV,'format',readParams{:});
%cache to disk in temporary location
save(cachedMAT,'ARA_table')
else
%If the data have been imported before we can just return them
load(cachedMAT)
end
%Filter the structure list if needed
if isempty(ancestorsOf) && ~isempty(childrenOf)
[ARA_table,tableRowInds] = returnChildrenOnly(ARA_table,childrenOf,excludeReferenceArea);
elseif ~isempty(ancestorsOf) && isempty(childrenOf)
[ARA_table,tableRowInds] = returnAncestorsOnly(ARA_table,ancestorsOf,excludeReferenceArea);
elseif ~isempty(ancestorsOf) && ~isempty(childrenOf)
[ARA_tableC,tableRowIndsC] = returnChildrenOnly(ARA_table,childrenOf,excludeReferenceArea);
[ARA_tableA,tableRowIndsA] = returnAncestorsOnly(ARA_table,ancestorsOf,excludeReferenceArea);
ARA_table = unique([ARA_tableC;ARA_tableA]);
tableRowInds = unique([tableRowIndsC;tableRowIndsA]);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function fInput = checkFilteringInput(fInput)
%Ensure that ancestorsOf or childrenOf are a suitable cell array
if iscell(fInput)
%Do nothing
elseif isvector(fInput) && ischar(fInput)
fInput={fInput};
elseif isnumeric(fInput)
fInput=num2cell(fInput);
else
fprintf('\n *** %s - Unknown value of input variable \n', mfilename)
help mfilename
return
end
function [returnedTable,tableRowInds] = returnAncestorsOnly(ARA_table,ancestorsOf,excludeReferenceArea)
% If the user asked for only the ancestors of an area, we search for these here and
% return an empty array with an on-screen warning if nothing could be found.
%
% Outputs
% returnedTable - the table containing only the ancestors of the selected area
% tableRowInds - these are: returnedTable = ARA_table(tableRowInds,:)
if isempty(ancestorsOf)
returnedTable=ARA_table;
tableRowInds=[];
return
end
childRows=[]; %Rows of the table for which we will find ancestors
for ii=1:length(ancestorsOf)
tChild=ancestorsOf{ii}; %This child for which we will look for parents
if isnumeric(tChild)
childRows=[childRows;find(ARA_table.id==tChild)];
elseif ischar(tChild)
childRows=[childRows;strmatch(tChild,ARA_table.name)];
end
end
%Loop through childRows and collect the table rows of all ancestors
ancestors=[];
for ii=1:length(childRows)
grandpa = ARA_table.structure_id_path(childRows(ii));
grandpa = strsplit(grandpa{1},'/'); %produce a cell array of character arrays that are area index values
grandpa = cell2mat(cellfun(@str2num,grandpa,'UniformOutput',false));
ancestors = [ancestors, grandpa];
end
if excludeReferenceArea
%Don't keep if this is the root area whose children we are looking for if the user asked to discard this
for ii = 1:length(childRows)
thisID = ARA_table.id(childRows(ii));
ancestors(ancestors==thisID)=[];
end
end
if isempty(childRows) || isempty(ancestors)
fprintf('\n\n *** NO ANCESTORS FOUND. RETURNING EMPTY ARRAY ***\n\n')
returnedTable=[];
tableRowInds=[];
return
end
tableRowInds = unique(ancestors);
for ii=1:length(ancestors)
tableRowInds(ii)=find(ARA_table.id==ancestors(ii));
end
returnedTable = ARA_table(tableRowInds,:); %filter it
function [returnedTable,tableRowInds] = returnChildrenOnly(ARA_table,childrenOf,excludeReferenceArea)
% If the user asked for only the children of an area, we search for these here and
% return an empty array with an on-screen warning if nothing could be found.
%
% Outputs
% returnedTable - the table containing only the children of the selected area
% tableRowInds - these are: returnedTable = ARA_table(tableRowInds,:)
if isempty(childrenOf)
returnedTable=ARA_table;
tableRowInds=[];
return
end
childRows=[]; %Rows of the table for which we will find children
for ii=1:length(childrenOf)
tChild=childrenOf{ii}; %This child for which we will look for parents
if isnumeric(tChild)
childRows=[childRows;find(ARA_table.id==tChild)];
elseif ischar(tChild)
childRows=[childRows;strmatch(tChild,ARA_table.name)];
end
end
%Get the index values associated with these rows
childRows = unique(childRows);
ind = zeros(size(childRows));
for ii=1:length(childRows)
ind(ii)=ARA_table.id(childRows(ii));
end
% Now we will loop through the whole table and look for rows that list each of these
% values in their structure_id_path
tableRowInds = [];
for thisInd = 1:length(ind)
for thisRow = 1:height(ARA_table)
sID = strsplit(ARA_table.structure_id_path{thisRow},'/');
sID = cell2mat(cellfun(@str2num,sID,'UniformOutput',false));
f=find(sID==ind(thisInd));
if ~isempty(f)
%Don't keep if this is the root area whose children we are looking for if the user asked to discard this
if excludeReferenceArea && max(f)==length(sID) %this works because it returns true when the target area is the last in the list
continue
end
tableRowInds(end+1)=thisRow;
end
end
end
if isempty(tableRowInds)
fprintf('\n\n *** NO CHILDREN FOUND. RETURNING EMPTY ARRAY ***\n\n')
returnedTable=[];
return
end
returnedTable = ARA_table(tableRowInds,:); %filter it
| MATLAB |
3D | Aswendt-Lab/AIDAmri | ARA/AllenBrainAPI-master/examples/thalamus.m | .m | 3,201 | 77 | function thalamus
% make a projection density plot
%
% This is the MATLAB version of the R example found at
% http://api.brain-map.org/examples/doc/thalamus/thalamus.R.html
%
% We have some more robust MATLAB functions that encapsulate
% some operations that were performed in-line in the R example.
% These standalone functions are used where appropriate here.
%
%
% Rob Campbell - Basel 2015
structures = getAllenStructureList; %read structure data as a table
data_sets = url2table('http://api.brain-map.org/api/v2/data/query.csv?criteria=model::SectionDataSet,rma::criteria,[id$in100141219,112423392,127084296,127866392,139426984,146858006,112424813],specimen%28injections%29,rma::options[tabular$eq%27distinct%20data_sets.id%20as%20section_data_set_id,injections.primary_injection_structure_id%27]');
%Here we use arrayfun instead of the R sapply function
data_sets.graph_order = arrayfun(@(x) structures.graph_order(structures.id == x), data_sets.primary_injection_structure_id);
data_sets.acronym = arrayfun(@(x) structures.acronym(structures.id == x), data_sets.primary_injection_structure_id);
data_sets = sortrows(data_sets,'graph_order');
unionizes = url2table('http://api.brain-map.org/api/v2/data/query.csv?criteria=model::ProjectionStructureUnionize,rma::criteria,section_data_set[id$in100141219,112423392,127084296,127866392,139426984,146858006,112424813],structure[acronym$in''VPL'',''VPM'',''PO'',''VAL'',''PF'',''VM'',''CM'',''RH'',''MD'',''PVT'',''RE'',''AM'',''AV'',''AD'',''LD'',''LP'',''LGv'',''LGd'',''MG''],rma::include,structure,rma::options[num_rows$eqall]');
%This is much more long-winded than the R way:
%m = xtabs(projection_volume ~ section_data_set_id + structure_id, unionizes)
setID = unique(unionizes.section_data_set_id);
strID = unique(unionizes.structure_id);
for ii=1:length(setID)
for jj=1:length(strID)
f=find(unionizes.section_data_set_id == setID(ii) & unionizes.structure_id == strID(jj));
m(ii,jj) = sum(unionizes.projection_volume(f));
end
end
%We now get the row and column names rather than use the ID numbers
row_acronyms = arrayfun(@(x) data_sets.acronym(data_sets.section_data_set_id == x), setID);
col_acronyms = arrayfun(@(x) structures.acronym(structures.id == x), strID);
%Sort the rows and columns by sorted graph_order
[~,row_order] = sort(arrayfun(@(x) data_sets.graph_order(data_sets.section_data_set_id == x), setID));
[~,col_order] = sort(arrayfun(@(x) structures.graph_order(structures.id == x), strID));
om = m(row_order,col_order);
row_acronyms = row_acronyms(row_order);
col_acronyms = col_acronyms(col_order);
%Make the plot
clf
imagesc(om)
set(gca,'YTick',1:size(om,1), 'YTickLabel',row_acronyms, 'XTick',1:size(om,2), 'XTickLabel',col_acronyms)
colormap hot
axis equal tight
xlabel('target structure')
ylabel('primary injection structure')
title('Cortico-thalamic projection')
%----------------------------------------------
function thisTable=url2table(url)
%Read a URL into a table
thalamusTMP=fullfile(tempdir,'thalamus.csv');
urlwrite(url,thalamusTMP);
thisTable=readtable(thalamusTMP);
delete(thalamusTMP) | MATLAB |
3D | zhengfj1994/PPGB_MS2 | smiles_create_dataset.py | .py | 3,732 | 103 | import os
import torch
import math
import pickle
import random
import functools
import multiprocessing
import numpy as np
import os.path as osp
from torch import Tensor
from joblib import Parallel, delayed
from torch_geometric.data import Dataset
from torch_geometric.data import Batch
from utils import pickle_load
from utils import raw_LDS_weight
from smiles_to_pyg_graph import precusor_product_graphs_generation
import dill
from joblib import Parallel, delayed
from tqdm import tqdm
def create_all_smiles_list(step4FilePath,step5FilePath,int_threshold,bin_size,weighted_power,batch_size,random_seed):
random.seed(random_seed)
step5FilePath_raw = step5FilePath+'/'+'raw'
if not os.path.exists(step5FilePath_raw): os.makedirs(step5FilePath_raw)
step4_list = [pickle_load(step4FilePath + '/' + i) for i in os.listdir(step4FilePath)]
step4_list = [i for k in step4_list for i in k]
step4_list = [_ for _ in step4_list if _[3]['intensity'] > int_threshold]
step4_list = raw_LDS_weight(step4_list, bin_size=bin_size, weighted_power=weighted_power)
random.shuffle(step4_list)
num_groups = len(step4_list) // batch_size
for i in tqdm(range(num_groups), desc="saving"):
group = step4_list[i*batch_size : (i+1)*batch_size]
group_filename = f"{step5FilePath_raw}/raw_{i}.pkl"
if not osp.exists(group_filename):
with open(group_filename, "wb") as f:
pickle.dump(group, f)
if len(step4_list) % batch_size != 0:
last_group = step4_list[num_groups*batch_size:]
last_group_filename = f"{step5FilePath_raw}/raw_{num_groups}.pkl"
if not osp.exists(group_filename):
with open(last_group_filename, "wb") as f:
pickle.dump(last_group, f)
def list_collate_fn(batch):
data_list = []
for ith_list in batch:
for data in ith_list:
data_list.append(data)
return Batch.from_data_list(data_list)
class mydataset(Dataset):
def __init__(self, root, transform = None, pre_transform = None, batch_size = 256, type = 'regression', n_jobs = 12):
self.batch_size = batch_size
self.type = type
self.n_jobs = n_jobs
super(mydataset, self).__init__(root, transform, pre_transform)
@property
def raw_file_names(self):
return os.listdir(self.raw_dir)[0]
@property
def processed_file_names(self):
self.data = pickle_load(self.raw_dir + '/' + os.listdir(self.raw_dir)[0])
return [f'data_{i}.pt' for i in range(len(os.listdir(self.raw_dir)))]
def download(self):
pass
def process(self):
lst1 = os.listdir(self.raw_dir)
Parallel(n_jobs = self.n_jobs, verbose = 1)(delayed(self.process_batch)(i) for i in tqdm(lst1))
def process_batch(self, ith_raw_file):
ith_raw_data = pickle_load(f'{self.raw_dir}/{ith_raw_file}')
file_path = os.path.join(self.processed_dir, f'data_{ith_raw_file.split("_")[-1].split(".")[0]}.pt')
if not osp.exists(file_path):
merged_data = None
for j in ith_raw_data:
data = precusor_product_graphs_generation(j, type = self.type)
if merged_data is None:
merged_data = [data]
else:
merged_data += [data]
torch.save(merged_data, file_path)
def len(self):
return len(os.listdir(self.raw_dir))
def get(self, idx):
data = torch.load(os.path.join(self.processed_dir, f'data_{idx}.pt'))
return data
@property
def feature_size(self):
return self[0].x.shape[1]
@property
def edge_dim(self):
return self[0].edge_attr.shape[1] | Python |
3D | zhengfj1994/PPGB_MS2 | utils3_ms2_matching.py | .py | 1,875 | 49 | import os
import pickle
import pandas as pd
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
from pandas.core.frame import DataFrame
def batch_ms2_matching(MSMSFilePath, step2FilePath, step3FilePath, n_jobs = -1):
if not os.path.exists(step3FilePath): os.makedirs(step3FilePath)
rawCsvData = pd.read_csv(MSMSFilePath, encoding = u'gbk')
csvData = rawCsvData
def my_task2(i):
MSMS = csvData['MSMS'].values[i]
MSMS = DataFrame(MSMS.split(';'))
MSMS = MSMS[0].str.split(' ',expand=True)
MSMS = MSMS[pd.to_numeric(MSMS[1], errors='coerce').isnull() == False]
MSMS = MSMS.apply(pd.to_numeric)
MSMS[1] = MSMS[1]/max(MSMS[1])
fileName = csvData['inchikey'].values[i] + '.pkl'
if fileName not in os.listdir(step2FilePath):
return 'none'
load_file = open(step2FilePath + '/' + fileName,"rb")
try:
fragment_list = pickle.load(load_file)
except:
return 'error'
for _ in fragment_list:
matched_MSMS = MSMS[(MSMS[0]<_.graph['product_mz']+0.01) & (MSMS[0]>_.graph['product_mz']-0.01)][1]
if len(matched_MSMS) == 0:
_.graph['intensity_regression1'] = 0
_.graph['intensity_regression2'] = 0
_.graph['intensity_classification'] = 0
else:
_.graph['intensity_regression1'] = matched_MSMS.mean()
_.graph['intensity_regression2'] = sum(matched_MSMS)/sum(MSMS[1])
_.graph['intensity_classification'] = 1
save_file = open(step3FilePath + '/' + fileName,"wb")
pickle.dump(fragment_list, save_file)
save_file.close()
return i
lst1 = range(csvData.shape[0])
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task2)(i) for i in tqdm(lst1)) | Python |
3D | zhengfj1994/PPGB_MS2 | utils5_pyg_graph.py | .py | 10,592 | 192 | import os
import copy
import torch
import pickle
import numpy as np
import networkx as nx
from rdkit import Chem
from torch import Tensor
from utils import pickle_load
from itertools import product
from tqdm.notebook import tqdm
from collections import defaultdict
from joblib import Parallel, delayed
from torch_geometric.data import Data
from torch_geometric.data import HeteroData
def one_hot_k_encode(x, permitted_list):
"""
x: used to convert to one-hot vector
premitted_list: predefined list
"""
if x not in permitted_list: x = permitted_list[-1]
binary_encoding = [int(boolean_value) for boolean_value in list(map(lambda s: x == s, permitted_list))]
return binary_encoding
def process(G, type, is_homo = False):
edges = list(G.edges)
edge_index = torch.tensor(edges, dtype=torch.long).t().contiguous()
node_attrs = list(next(iter(G.nodes(data=True)))[-1].keys())
edge_attrs = list(next(iter(G.edges(data=True)))[-1].keys())
data = defaultdict(list)
for i, (_, feat_dict) in enumerate(G.nodes(data=True)):
for key, value in feat_dict.items():
data[str(key)].append(value)
for i, (_, _, feat_dict) in enumerate(G.edges(data=True)):
for key, value in feat_dict.items():
key = f'edge_{key}' if key in node_attrs else key
data[str(key)].append(value)
for key, value in G.graph.items():
key = f'graph_{key}' if key in node_attrs else key
data[str(key)] = value
for key, value in data.items():
if isinstance(value, (tuple, list)) and isinstance(value[0], Tensor):
data[key] = torch.stack(value, dim=0)
else:
try:
data[key] = torch.tensor(value)
except (ValueError, TypeError):
pass
data['edge_index'] = edge_index.view(2, -1)
data = Data.from_dict(data)
if data.x is None and data.pos is None:
data.num_nodes = G.number_of_nodes()
permitted_list_of_atoms = [6, 7, 8, 16, 15, 9, 17, 35, 53,'unknown']
atomic_num_one_hot = np.zeros((len(data.atomic_num), len(permitted_list_of_atoms)))
for _ in range((len(data.atomic_num))):
atomic_num_one_hot[_, :] = one_hot_k_encode(data.atomic_num[_], permitted_list_of_atoms)
atomic_num_one_hot = torch.tensor(atomic_num_one_hot).to(torch.float32)
formal_charge_one_hot = np.zeros((len(data.formal_charge), 4))
for _ in range((len(data.formal_charge))):
formal_charge_one_hot[_, :] = one_hot_k_encode(data.formal_charge[_], [-1, 0, 1,'unknown'])
formal_charge_one_hot = torch.tensor(formal_charge_one_hot).to(torch.float32)
radical_electrons_one_hot = np.zeros((len(data.radical_electrons), 3))
for _ in range((len(data.radical_electrons))):
radical_electrons_one_hot[_, :] = one_hot_k_encode(data.radical_electrons[_], [0, 1, 'unknown'])
radical_electrons_one_hot = torch.tensor(radical_electrons_one_hot).to(torch.float32)
hybridization_one_hot = np.zeros((len(data.hybridization), 5))
for _ in range((len(data.hybridization))):
hybridization_one_hot[_, :] = one_hot_k_encode(data.hybridization[_], [1, 2, 3, 4, 'unknown'])
hybridization_one_hot = torch.tensor(hybridization_one_hot).to(torch.float32)
total_num_Hs_one_hot = np.zeros((len(data.total_num_Hs), 6))
for _ in range((len(data.total_num_Hs))):
total_num_Hs_one_hot[_, :] = one_hot_k_encode(data.total_num_Hs[_], [0, 1, 2, 3, 4, 'unknown'])
total_num_Hs_one_hot = torch.tensor(total_num_Hs_one_hot).to(torch.float32)
is_in_ring_one_hot = np.zeros((len(data.is_in_ring), 2))
for _ in range((len(data.is_in_ring))):
is_in_ring_one_hot[_, :] = one_hot_k_encode(data.is_in_ring[_], [True, False])
is_in_ring_one_hot = torch.tensor(is_in_ring_one_hot).to(torch.float32)
is_aromatic_one_hot = np.zeros((len(data.is_aromatic), 2))
for _ in range((len(data.is_aromatic))):
is_aromatic_one_hot[_, :] = one_hot_k_encode(data.is_aromatic[_], [True, False])
is_aromatic_one_hot = torch.tensor(is_aromatic_one_hot).to(torch.float32)
is_parent_one_hot = np.zeros((len(data.is_parent), 2))
for _ in range((len(data.is_parent))):
is_parent_one_hot[_, :] = one_hot_k_encode(data.is_parent[_], [True, False])
is_parent_one_hot = torch.tensor(is_parent_one_hot).to(torch.float32)
x = torch.cat([atomic_num_one_hot,
formal_charge_one_hot,
radical_electrons_one_hot,
hybridization_one_hot,
total_num_Hs_one_hot,
is_in_ring_one_hot,
is_aromatic_one_hot,
is_parent_one_hot],dim=1)
bond_type_one_hot = np.zeros((len(data.bond_type), 5))
for _ in range((len(data.bond_type))):
bond_type_one_hot[_, :] = one_hot_k_encode(data.bond_type[_], [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC, 'Fragmentation'])
bond_type_one_hot = torch.tensor(bond_type_one_hot).to(torch.float32)
edge_attr = bond_type_one_hot
edge_type = torch.Tensor([0 if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] == data.is_parent.numpy()[edge_index[1,i].numpy().tolist()] == True else 1 if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] == data.is_parent.numpy()[edge_index[1,i].numpy().tolist()] == False else 2 for i in range(edge_index.shape[1])]).int()
edge_index = edge_index
if type == 'regression1':
y = torch.tensor(data.intensity_regression1, dtype=torch.float).to(torch.float32)
if type == 'regression2':
y = torch.tensor(data.intensity_regression2, dtype=torch.float).to(torch.float32)
elif type == 'classification':
y = torch.tensor(data.intensity_classification, dtype=torch.long)
elif type == 'prediction':
y = -1
if is_homo:
homogeneous_data = Data(x=x, edge_attr=edge_attr, edge_type=edge_type, edge_index=edge_index, y=y)
return homogeneous_data
else:
hetero_data = HeteroData()
hetero_data['parent'].x = x[np.where(data.is_parent.numpy())]
hetero_data['product'].x = x[np.where(data.is_parent.numpy()==False)]
hetero_data['parent','bond','parent'].edge_index = edge_index[:,[i for i in range(edge_index.shape[1]) if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] == data.is_parent.numpy()[edge_index[1,i].numpy().tolist()] == True]]
hetero_data['product','bond','product'].edge_index = edge_index[:,[i for i in range(edge_index.shape[1]) if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] == data.is_parent.numpy()[edge_index[1,i].numpy().tolist()] == False]]
hetero_data['parent','fragment','product'].edge_index = edge_index[:,[i for i in range(edge_index.shape[1]) if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] != data.is_parent.numpy()[edge_index[1,i].numpy().tolist()]]]
hetero_data['parent','bond','parent'].edge_attr = edge_attr[[i for i in range(edge_index.shape[1]) if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] == data.is_parent.numpy()[edge_index[1,i].numpy().tolist()] == True]]
hetero_data['product','bond','product'].edge_attr = edge_attr[[i for i in range(edge_index.shape[1]) if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] == data.is_parent.numpy()[edge_index[1,i].numpy().tolist()] == False]]
hetero_data['parent','fragment','product'].edge_attr = edge_attr[[i for i in range(edge_index.shape[1]) if data.is_parent.numpy()[edge_index[0,i].numpy().tolist()] != data.is_parent.numpy()[edge_index[1,i].numpy().tolist()]]]
hetero_data.y = y
return hetero_data
def batch_transfer_pyg_graph(step4FilePath, step5FilePath_regression1, step5FilePath_regression2, step5FilePath_classification, MW_threshold, n_jobs = -1):
if not step5FilePath_regression1 is None:
if not os.path.exists(step5FilePath_regression1): os.makedirs(step5FilePath_regression1)
if not step5FilePath_regression2 is None:
if not os.path.exists(step5FilePath_regression2): os.makedirs(step5FilePath_regression2)
if not step5FilePath_classification is None:
if not os.path.exists(step5FilePath_classification): os.makedirs(step5FilePath_classification)
step4Files = os.listdir(step4FilePath)
for i in step4Files:
print(i)
ith_Graph_list = pickle_load(step4FilePath + '/' + i)
ith_Graph_list = [_ for _ in ith_Graph_list if _.graph['product_mz'] >= MW_threshold]
if not step5FilePath_regression1 is None:
data_list_regression1 = Parallel(n_jobs = n_jobs, verbose = 0)(delayed(process)(G, type = 'regression1', is_homo = True) for G in tqdm(ith_Graph_list))
save_file_regression1 = open(step5FilePath_regression1+'/'+i,"wb")
pickle.dump(data_list_regression1, save_file_regression1)
save_file_regression1.close()
if not step5FilePath_regression2 is None:
data_list_regression2 = Parallel(n_jobs = n_jobs, verbose = 0)(delayed(process)(G, type = 'regression2', is_homo = True) for G in tqdm(ith_Graph_list))
save_file_regression2 = open(step5FilePath_regression2+'/'+i,"wb")
pickle.dump(data_list_regression2, save_file_regression2)
save_file_regression2.close()
if not step5FilePath_classification is None:
data_list_classification = Parallel(n_jobs = n_jobs, verbose = 0)(delayed(process)(G, type = 'classification', is_homo = True) for G in tqdm(ith_Graph_list))
save_file_classification = open(step5FilePath_classification+'/'+i,"wb")
pickle.dump(data_list_classification, save_file_classification)
save_file_classification.close()
def batch_transfer_pyg_graph_for_new_molecules(step2FilePath, step5FilePath, MW_threshold, n_jobs = -1):
if not os.path.exists(step5FilePath): os.makedirs(step5FilePath)
step4Files = os.listdir(step2FilePath)
for i in step4Files:
print(i)
ith_Graph_list = pickle_load(step2FilePath + '/' + i)
ith_Graph_list = [_ for _ in ith_Graph_list if _.graph['product_mz'] >= MW_threshold]
data_list_regression = Parallel(n_jobs = n_jobs, verbose = 0)(delayed(process)(G, type = 'prediction', is_homo = True) for G in tqdm(ith_Graph_list))
save_file_regression = open(step5FilePath+'/'+i,"wb")
pickle.dump(data_list_regression, save_file_regression)
save_file_regression.close() | Python |
3D | zhengfj1994/PPGB_MS2 | utils4_graph_augmentation.py | .py | 2,217 | 50 | import os
import copy
import pickle
import numpy as np
import networkx as nx
from itertools import product
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
# networkx
def graph_augmentation(raw_Graph, repeat = 2, augmentation_ratio = 0.6, with_raw_Graph = True):
copyed_Graph = copy.deepcopy(raw_Graph)
Fragmentation_edges = [(_[0],_[1]) for _ in copyed_Graph.edges(data=True) if _[2]['bond_type'] == 'Fragmentation']
copyed_Graph.remove_edges_from(Fragmentation_edges)
copyed_Graph.edges(data=True)
sub_Graphs_index = list(nx.connected_components(copyed_Graph))
sub_Graphs_index = [list(_) for _ in sub_Graphs_index if copyed_Graph.nodes[list(_)[0]]['is_parent'] == False]
sub_Graphs_index = list(product(sub_Graphs_index, repeat = min(repeat, int(len(sub_Graphs_index) * (1 - augmentation_ratio)))))
sub_Graphs_index = [(set([i for k in _ for i in k])) for _ in sub_Graphs_index]
sub_Graphs_index = [list(t) for t in set(tuple(_) for _ in sub_Graphs_index)]
if with_raw_Graph: augmented_Graphs = [raw_Graph]
else: augmented_Graphs = []
if len(sub_Graphs_index[0]) > 0:
for ithGraph in sub_Graphs_index:
ith_copyed_Graph = copy.deepcopy(raw_Graph)
ith_copyed_Graph.remove_nodes_from(ithGraph)
augmented_Graphs += [nx.disjoint_union(ith_copyed_Graph,nx.Graph())]
return augmented_Graphs
def batch_graph_augmentation(step3FilePath, step4FilePath, n_jobs = -1):
if not os.path.exists(step4FilePath): os.makedirs(step4FilePath)
def my_task(i):
load_file = open(step3FilePath + '/' + i,"rb")
try: fragment_list = pickle.load(load_file)
except: return 'error'
augmented_fragment_list = [graph_augmentation(_) if _.graph['intensity_regression1'] else [_] for _ in fragment_list] # Data augmentation
augmented_fragment_list = [i for k in augmented_fragment_list for i in k]
save_file = open(step4FilePath + '/' + i,"wb")
pickle.dump(augmented_fragment_list, save_file)
save_file.close()
return i
lst1 = os.listdir(step3FilePath)
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task)(i) for i in tqdm(lst1))
| Python |
3D | zhengfj1994/PPGB_MS2 | smiles_augmentation.py | .py | 2,560 | 52 | import os
import copy
import math
import random
import pickle
import numpy as np
import networkx as nx
from itertools import product
from itertools import combinations
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
def smiles_augmentation(raw_smiles, repeat, augmentation_ratio, max_augmentation_times):
max_augmentation_times = int(max_augmentation_times)
random.seed(99)
fragments = raw_smiles[1]
repeat = max(len(fragments)-repeat,math.ceil(len(fragments)*augmentation_ratio))
sub_fragments = []
for i in range(repeat,len(fragments)):
sub_fragments += list(combinations(fragments, i))
if len(sub_fragments) > max_augmentation_times:
sub_fragments = random.sample(sub_fragments, max_augmentation_times)
sub_fragments += [tuple(fragments)]
augmented_smiles = []
if len(raw_smiles) == 5:
for i in sub_fragments:
augmented_smiles += [[raw_smiles[0]] + [i] + [raw_smiles[2]] + [raw_smiles[3]] + [raw_smiles[4]]]
else:
for i in sub_fragments:
augmented_smiles += [[raw_smiles[0]] + [i] + [raw_smiles[2]] + [raw_smiles[3]]]
return augmented_smiles
def batch_smiles_augmentation(step3FilePath, step4FilePath, repeat_1, augmentation_ratio_1, max_augmentation_times_1, repeat_2, augmentation_ratio_2, max_augmentation_times_2, int_threshold, n_jobs = -1):
if not os.path.exists(step4FilePath): os.makedirs(step4FilePath)
def my_task(i):
load_file = open(step3FilePath + '/' + i,"rb") # open file
try: fragment_list = pickle.load(load_file) # open file
except: return 'error' # if open file failed
augmented_fragment_list = [smiles_augmentation(raw_smiles=_, repeat=repeat_1, augmentation_ratio=augmentation_ratio_1, max_augmentation_times=max_augmentation_times_1) \
if _[3]['intensity']>int_threshold else \
smiles_augmentation(raw_smiles=_, repeat=repeat_2, augmentation_ratio=augmentation_ratio_2, max_augmentation_times=max_augmentation_times_2) \
for _ in fragment_list] # Data augmentation
augmented_fragment_list = [i for k in augmented_fragment_list for i in k]
save_file = open(step4FilePath + '/' + i,"wb") # save file
pickle.dump(augmented_fragment_list, save_file) # save file
save_file.close() # save file
return i
lst1 = os.listdir(step3FilePath)
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task)(i) for i in tqdm(lst1)) | Python |
3D | zhengfj1994/PPGB_MS2 | utils6_loss.py | .py | 5,138 | 144 | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.distributions import MultivariateNormal as MVN
class WeightedFocalLoss(nn.Module):
"Non weighted version of Focal Loss"
def __init__(self, alpha=.25, gamma=2):
super(WeightedFocalLoss, self).__init__()
self.alpha = torch.tensor([alpha, 1-alpha]).cuda()
self.gamma = gamma
def forward(self, inputs, targets):
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
targets = targets.type(torch.long)
at = self.alpha.gather(0, targets.data.view(-1))
pt = torch.exp(-BCE_loss)
F_loss = at*(1-pt)**self.gamma * BCE_loss
return F_loss.mean()
class ReweightL2(_Loss):
def __init__(self, train_dist, reweight='inverse'):
super(ReweightL2, self).__init__()
self.reweight = reweight
self.train_dist = train_dist
def forward(self, pred, target):
reweight = self.reweight
prob = self.train_dist.log_prob(target).exp().squeeze(-1)
if reweight == 'inverse':
inv_prob = prob.pow(-1)
elif reweight == 'sqrt_inv':
inv_prob = prob.pow(-0.5)
else:
raise NotImplementedError
inv_prob = inv_prob / inv_prob.sum()
loss = F.mse_loss(pred, target, reduction='none').sum(-1) * inv_prob
loss = loss.sum()
return loss
class GAILossMD(_Loss):
"""
Multi-Dimension version GAI, compatible with 1-D GAI
"""
def __init__(self, init_noise_sigma, gmm):
super(GAILossMD, self).__init__()
self.gmm = gmm
self.gmm = {k: torch.tensor(self.gmm[k]) for k in self.gmm}
self.noise_sigma = torch.nn.Parameter(torch.tensor(init_noise_sigma))
def forward(self, pred, target):
noise_var = self.noise_sigma ** 2
loss = gai_loss_md(pred, target, self.gmm, noise_var)
return loss
def gai_loss_md(pred, target, gmm, noise_var):
I = torch.eye(pred.shape[-1])
mse_term = -MVN(pred, noise_var*I).log_prob(target)
balancing_term = MVN(gmm['means'], gmm['variances']+noise_var*I).log_prob(pred.unsqueeze(1)) + gmm['weights'].log()
balancing_term = torch.logsumexp(balancing_term, dim=1)
loss = mse_term + balancing_term
loss = loss * (2 * noise_var).detach()
return loss.mean()
class BMCLossMD(_Loss):
"""
Multi-Dimension version BMC, compatible with 1-D BMC
"""
def __init__(self, init_noise_sigma):
super(BMCLossMD, self).__init__()
self.noise_sigma = torch.nn.Parameter(torch.tensor(init_noise_sigma))
def forward(self, pred, target):
noise_var = self.noise_sigma ** 2
loss = bmc_loss_md(pred, target, noise_var)
return loss
def bmc_loss_md(pred, target, noise_var):
I = torch.eye(pred.shape[-1]).to('cuda')
logits = MVN(pred.unsqueeze(1), noise_var*I).log_prob(target.unsqueeze(0))
loss = F.cross_entropy(logits, torch.arange(pred.shape[0]).to('cuda'))
loss = loss * (2 * noise_var).detach()
return loss
# https://github.com/YyzHarry/imbalanced-regression
def weighted_mse_loss(inputs, targets, weights=None):
loss = F.mse_loss(inputs, targets, reduce=False)
if weights is not None:
loss *= weights.expand_as(loss)
loss = torch.mean(loss)
return loss
# https://github.com/YyzHarry/imbalanced-regression
def weighted_l1_loss(inputs, targets, weights=None):
loss = F.l1_loss(inputs, targets, reduce=False)
if weights is not None:
loss *= weights.expand_as(loss)
loss = torch.mean(loss)
return loss
# https://github.com/YyzHarry/imbalanced-regression
def weighted_huber_loss(inputs, targets, weights=None, beta=0.5):
l1_loss = torch.abs(inputs - targets)
cond = l1_loss < beta
loss = torch.where(cond, 0.5 * l1_loss ** 2 / beta, l1_loss - 0.5 * beta)
if weights is not None:
loss *= weights.expand_as(loss)
loss = torch.mean(loss)
return loss
# https://github.com/YyzHarry/imbalanced-regression
def weighted_focal_mse_loss(inputs, targets, weights=None, activate='sigmoid', beta=20., gamma=1):
loss = F.mse_loss(inputs, targets, reduce=False)
loss *= (torch.tanh(beta * torch.abs(inputs - targets))) ** gamma if activate == 'tanh' else \
(2 * torch.sigmoid(beta * torch.abs(inputs - targets)) - 1) ** gamma
if weights is not None:
loss *= weights.expand_as(loss)
loss = torch.mean(loss)
return loss
# https://github.com/YyzHarry/imbalanced-regression
def weighted_focal_l1_loss(inputs, targets, weights=None, activate='sigmoid', beta=20., gamma=1):
loss = F.l1_loss(inputs, targets, reduce=False)
loss *= (torch.tanh(beta * torch.abs(inputs - targets))) ** gamma if activate == 'tanh' else \
(2 * torch.sigmoid(beta * torch.abs(inputs - targets)) - 1) ** gamma
if weights is not None:
loss *= weights.expand_as(loss)
loss = torch.mean(loss)
return loss
| Python |
3D | zhengfj1994/PPGB_MS2 | smiles_ms2_matching.py | .py | 2,970 | 64 | import os
import pickle
import pandas as pd
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
from pandas.core.frame import DataFrame
def batch_ms2_matching(MSMSFilePath, step2FilePath, step3FilePath, int_threshold, MS2_mz_tolerance, n_jobs = -1):
if not os.path.exists(step3FilePath): os.makedirs(step3FilePath)
rawCsvData = pd.read_csv(MSMSFilePath, encoding = u'gbk')
csvData = rawCsvData
def my_task(i):
InstrumentManufacturer = csvData['InstrumentManufacturer'][i]
IonSource = csvData['IonSource'][i]
DetectorType = csvData['DetectorType'][i]
FragmentationType = csvData['FragmentationType'][i]
CollisionGas = csvData['CollisionGas'][i]
CE = csvData['CE'][i]
KE = csvData['KE'][i]
EBC = csvData['EBC'][i]
AdductType = csvData['AdductType'][i]
step2FileName = csvData['AdductType'].values[i] + '_' +csvData['inchikey'].values[i] + '.pkl'
step3FileName = InstrumentManufacturer + '_' + IonSource + '_' + DetectorType + '_' + FragmentationType + '_' + CollisionGas + '_' + str(CE) + '_' + str(KE) + '_' + str(EBC) + '_' + step2FileName
if step3FileName not in os.listdir(step3FilePath) and step2FileName in os.listdir(step2FilePath): # If ith item is not processed
load_file = open(step2FilePath + '/' + step2FileName,"rb")
try:
fragment_list = pickle.load(load_file)
except:
return 'error'
if 'MSMS' in csvData.columns:
MSMS = csvData['MSMS'].values[i]
MSMS = DataFrame(MSMS.split(';'))
MSMS = MSMS[0].str.split(' ',expand=True)
MSMS = MSMS[pd.to_numeric(MSMS[1], errors='coerce').isnull() == False]
MSMS = MSMS.apply(pd.to_numeric)
MSMS = MSMS[MSMS[1] > int_threshold]
if len(MSMS) == 0:
return 'No fragment'
for _ in fragment_list:
if 'MSMS' in csvData.columns:
matched_MSMS = MSMS[(MSMS[0]<_[0]+MS2_mz_tolerance) & (MSMS[0]>_[0]-MS2_mz_tolerance)][1]
if len(matched_MSMS) == 0:
_ += [{'intensity': 0}]
else:
_ += [{'intensity': sum(matched_MSMS)/sum(MSMS[1])}]
_+= [{'InstrumentManufacturer': InstrumentManufacturer, 'IonSource': IonSource, 'DetectorType': DetectorType, 'FragmentationType': FragmentationType,
'CollisionGas': CollisionGas, 'CE': CE, 'KE': KE, 'EBC': EBC, 'AdductType': AdductType}]
save_file = open(step3FilePath + '/' + step3FileName,"wb")
pickle.dump(fragment_list, save_file)
save_file.close()
return i
else:
return 'done'
lst1 = range(csvData.shape[0])
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task)(i) for i in tqdm(lst1)) | Python |
3D | zhengfj1994/PPGB_MS2 | utils1_fragmentation.py | .py | 10,794 | 210 | import copy
import pandas as pd
import networkx as nx
from rdkit import Chem
from rdkit.Chem import Descriptors
adduct_dict = {'[M+H]+' : {'mz': 1.00727646677, 'smiles':'.[H+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[H+]' : {'mz': 1.00727646677, 'smiles':'.[H+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[M+Na]+': {'mz': 22.989218, 'smiles':'.[Na+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[Na]+': {'mz': 22.989218, 'smiles':'.[Na+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[M+K]+' : {'mz': 38.963158, 'smiles':'.[K+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[K]+' : {'mz': 38.963158, 'smiles':'.[K+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[M-H]-' : {'mz': -1.00727646677, 'smiles':'.[M-H][-]', 'charge_mz': 0.00054857990924, 'charge_smiles': '.[-]', 'ion_mode':'negative'},
'[M-H][-]' : {'mz': -1.00727646677, 'smiles':'.[M-H][-]', 'charge_mz': 0.00054857990924, 'charge_smiles': '.[-]', 'ion_mode':'negative'},
'[+]':{'mz': -0.00054857990924, 'smiles':'.[+]', 'ion_mode':'positive'},
'[-]':{'mz': 0.00054857990924, 'smiles':'.[-]', 'ion_mode':'negative'}}
def mol_to_nx(mol):
G = nx.Graph()
for atom in mol.GetAtoms():
G.add_node(atom.GetIdx(),
atom_map_num = atom.GetAtomMapNum(),
atomic_num = atom.GetAtomicNum(),
formal_charge = atom.GetFormalCharge(),
radical_electrons = atom.GetNumRadicalElectrons(),
hybridization = atom.GetHybridization(),
total_num_Hs = atom.GetTotalNumHs(),
total_degree = atom.GetTotalDegree(),
mass = atom.GetMass(),
is_in_ring = atom.IsInRing(),
is_aromatic = atom.GetIsAromatic())
for bond in mol.GetBonds():
G.add_edge(bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx(),
bond_type = bond.GetBondType())
return G
def nx_to_mol(G):
mol = Chem.RWMol()
atom_map_num = nx.get_node_attributes(G, 'atom_map_num')
atomic_num = nx.get_node_attributes(G, 'atomic_num')
formal_charge = nx.get_node_attributes(G, 'formal_charge')
radical_electrons = nx.get_node_attributes(G, 'radical_electrons')
hybridization = nx.get_node_attributes(G, 'hybridization')
total_num_Hs = nx.get_node_attributes(G, 'total_num_Hs')
total_degree = nx.get_node_attributes(G, 'total_degree')
is_in_ring = nx.get_node_attributes(G, 'is_in_ring')
is_aromatic = nx.get_node_attributes(G, 'is_aromatic')
node_to_idx = {}
for node in G.nodes():
a=Chem.Atom(atomic_num[node])
a.SetAtomMapNum(atom_map_num[node])
a.SetProp('atomNote', str(atom_map_num[node]))
a.SetFormalCharge(formal_charge[node])
a.SetNumRadicalElectrons(radical_electrons[node])
a.SetHybridization(hybridization[node])
a.SetIsAromatic(is_aromatic[node])
idx = mol.AddAtom(a)
node_to_idx[node] = idx
bond_types = nx.get_edge_attributes(G, 'bond_type')
for edge in G.edges():
first, second = edge
ifirst = node_to_idx[first]
isecond = node_to_idx[second]
bond_type = bond_types[first, second]
mol.AddBond(ifirst, isecond, bond_type)
mol.UpdatePropertyCache()
return mol
def kekule_graph(graph):
mol = nx_to_mol(graph)
Chem.SanitizeMol(mol)
Chem.Kekulize(mol)
charge_num = Chem.rdmolops.GetFormalCharge(mol)
NumValenceElectrons = Descriptors.NumValenceElectrons(mol)
NumFormalCharges = sum([abs(atom.GetFormalCharge()) for atom in mol.GetAtoms()])
num_h = []
for u in range(mol.GetNumAtoms()):
num_h += [mol.GetAtomWithIdx(u).GetTotalNumHs()]
suppl = Chem.ResonanceMolSupplier(mol, Chem.KEKULE_ALL | Chem.ALLOW_CHARGE_SEPARATION | Chem.ALLOW_INCOMPLETE_OCTETS)
kekule_graphs = []
for ithMol in suppl:
if ithMol == None:
continue
if not Descriptors.NumValenceElectrons(ithMol) == NumValenceElectrons:
continue
if not sum([abs(atom.GetFormalCharge()) for atom in ithMol.GetAtoms()]) == NumFormalCharges:
continue
ith_num_h = []
for u in range(ithMol.GetNumAtoms()):
ith_num_h += [ithMol.GetAtomWithIdx(u).GetTotalNumHs()]
if not ith_num_h == num_h:
continue
for ithAtom in ithMol.GetAtoms():
ithAtom.SetIsAromatic(False)
ithGraph = mol_to_nx(ithMol)
for ithNode in ithGraph.nodes():
ithGraph.add_node(ithNode, is_parent=graph.nodes[list(graph.nodes())[0]]['is_parent'])
ithGraph.graph = graph.graph
kekule_graphs += [ithGraph]
if len(kekule_graphs) == 0:
return [graph]
else:
return kekule_graphs
def sanitize_graph(graph):
for ithNode in graph.nodes():
graph.nodes[ithNode]['is_aromatic'] = False
mol = nx_to_mol(graph)
Chem.SanitizeMol(mol)
Chem.Kekulize(mol)
sanitized_graph = mol_to_nx(mol)
for ithNode in sanitized_graph.nodes():
sanitized_graph.add_node(ithNode, is_parent=graph.nodes[list(graph.nodes())[0]]['is_parent'])
sanitized_graph.graph = graph.graph
return(sanitized_graph)
def graph_duplicate(fragment_result_list, step = 1):
if len(fragment_result_list) > 1:
if step == 1:
product_SMILES = [fragment_result.graph['product_SMILES'] for fragment_result in fragment_result_list]
parent_mz = [fragment_result.graph['parent_mz'] for fragment_result in fragment_result_list]
info_df = pd.DataFrame({'product_SMILES':product_SMILES, 'parent_mz':parent_mz})
index = sorted(info_df.groupby(['product_SMILES'])['parent_mz'].idxmax())
else:
MW = [round(fragment_result.graph['product_mz'],5) for fragment_result in fragment_result_list]
atom_map_num = [",".join([str(x) for x in sorted(list(nx.get_node_attributes(fragment_result, 'atom_map_num').values()))]) for fragment_result in fragment_result_list] # 列表推导式获取各种信息
parent_mz = [fragment_result.graph['parent_mz'] for fragment_result in fragment_result_list]
info_df = pd.DataFrame({'MW':MW, 'atom_map_num':atom_map_num, 'parent_mz':parent_mz})
index = sorted(info_df.groupby(['MW','atom_map_num'])['parent_mz'].idxmax())
fragment_result_list = [fragment_result_list[i] for i in index]
return(fragment_result_list)
def preprocess_raw_graph(raw_graph):
raw_graph_product_SMILES = raw_graph.graph['product_SMILES']
splited_raw_graph_product_SMILES = raw_graph_product_SMILES.split('.')
if len(splited_raw_graph_product_SMILES) == 1:
charge_position = True
charge_source = []
elif len(splited_raw_graph_product_SMILES) == 2:
charge_position = False
charge_source = splited_raw_graph_product_SMILES[1]
else:
charge_position = 'wrong'
parent_graph = copy.deepcopy(raw_graph)
[parent_graph.remove_node(i) for i in range(int(parent_graph.number_of_nodes())) if not parent_graph.nodes[i]['is_parent']]
product_graph = copy.deepcopy(raw_graph)
[product_graph.remove_node(i) for i in range(int(product_graph.number_of_nodes())) if product_graph.nodes[i]['is_parent']]
product_graph = nx.disjoint_union(product_graph,nx.Graph())
kekuled_product_graphs = kekule_graph(product_graph)
return(charge_position, charge_source, parent_graph, kekuled_product_graphs)
def combind_parent_product_graph(raw_graph, parent_graph, ith_productGraph, fragmentation, adduct, charge_source, charge_position):
ith_subGraph = nx.disjoint_union(parent_graph, ith_productGraph)
atom_map_num = [ith_subGraph.nodes[i]['atom_map_num'] for i in range(ith_subGraph.number_of_nodes())]
unique_atom_map_num = [i for i in list(set(atom_map_num)) if atom_map_num.count(i) == 2]
atom_map_bond = [[id for id, value in enumerate(atom_map_num) if value == i] for i in unique_atom_map_num]
[ith_subGraph.add_edges_from([(i[0], i[1], {'bond_type':'Fragmentation'})]) for i in atom_map_bond]
ith_subGraph.graph['fragment_type'] = raw_graph.graph['fragment_type'] + '+' + fragmentation
ith_subGraph.graph['adduct'] = adduct
ith_subGraph.graph['parent_SMILES'] = raw_graph.graph['product_SMILES']
ith_subGraph.graph['parent_mz'] = raw_graph.graph['product_mz']
if charge_position:
ith_subGraph.graph['product_SMILES'] = Chem.MolToSmiles(nx_to_mol(ith_productGraph))
ith_subGraph.graph['product_mz'] = Descriptors.ExactMolWt(nx_to_mol(ith_productGraph))
else:
ith_subGraph.graph['product_SMILES'] = Chem.MolToSmiles(nx_to_mol(ith_productGraph)) + '.' + charge_source
ith_subGraph.graph['product_mz'] = Descriptors.ExactMolWt(nx_to_mol(ith_productGraph)) + adduct_dict[charge_source]['mz']
return(ith_subGraph)
def raw_graph_generator(smiles,adduct):
mol = Chem.MolFromSmiles(smiles)
Chem.Kekulize(mol)
charge_num = Chem.rdmolops.GetFormalCharge(mol)
for atom in mol.GetAtoms():
atom.SetAtomMapNum(atom.GetIdx())
parent_graph = mol_to_nx(mol)
[parent_graph.add_node(i, is_parent = True) for i in range(parent_graph.number_of_nodes())]
product_graph = mol_to_nx(mol)
[product_graph.add_node(i, is_parent = False) for i in range(product_graph.number_of_nodes())]
raw_graph = nx.disjoint_union(parent_graph,product_graph)
[raw_graph.add_edges_from([(i, i+parent_graph.number_of_nodes(), {'bond_type':'Fragmentation'})]) for i in range(product_graph.number_of_nodes())]
raw_graph.graph['fragment_type'] = 'parent_ion'
raw_graph.graph['adduct'] = adduct
if charge_num == 0:
raw_graph.graph['parent_SMILES'] = Chem.MolToSmiles(mol) + adduct_dict[adduct]['smiles']
raw_graph.graph['parent_mz'] = Descriptors.ExactMolWt(mol) + adduct_dict[adduct]['mz']
raw_graph.graph['product_SMILES'] = Chem.MolToSmiles(mol) + adduct_dict[adduct]['smiles']
raw_graph.graph['product_mz'] = Descriptors.ExactMolWt(mol) + adduct_dict[adduct]['mz']
else:
raw_graph.graph['parent_SMILES'] = Chem.MolToSmiles(mol)
raw_graph.graph['parent_mz'] = Descriptors.ExactMolWt(mol)
raw_graph.graph['product_SMILES'] = Chem.MolToSmiles(mol)
raw_graph.graph['product_mz'] = Descriptors.ExactMolWt(mol)
return(raw_graph) | Python |
3D | zhengfj1994/PPGB_MS2 | utils.py | .py | 4,542 | 107 | import os
import math
import glob
import copy
import random
import shutil
import pickle
import numpy as np
import pandas as pd
from collections import Counter
from scipy.ndimage import convolve1d
from scipy.signal.windows import triang
from scipy.ndimage import gaussian_filter1d
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def pickle_load(pkl_file):
save_file = open(pkl_file,'rb')
try:
return(pickle.load(save_file))
except:
return []
# For LDS (https://github.com/YyzHarry/imbalanced-regression)
def get_lds_kernel_window(kernel, ks, sigma):
assert kernel in ['gaussian', 'triang', 'laplace']
half_ks = (ks - 1) // 2
if kernel == 'gaussian':
base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks
kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / max(gaussian_filter1d(base_kernel, sigma=sigma))
elif kernel == 'triang':
kernel_window = triang(ks)
else:
laplace = lambda x: np.exp(-abs(x) / sigma) / (2. * sigma)
kernel_window = list(map(laplace, np.arange(-half_ks, half_ks + 1))) / max(map(laplace, np.arange(-half_ks, half_ks + 1)))
return kernel_window
# For LDS (https://github.com/YyzHarry/imbalanced-regression)
def get_bin_idx(label,bin_size,contain_zero=True):
bin1 = int(label/bin_size)
bin2 = math.ceil(label/bin_size)
if contain_zero:
if bin1 == bin2: return bin1
else: return bin1 + 1
else:
if bin1 == bin2: return bin1-1
else: return bin1
def LDS_weight(data_list,bin_size, weighted_power):
# preds, labels: [Ns,], "Ns" is the number of total samples
labels = [_['y'].numpy().tolist() for _ in data_list]
# assign each label to its corresponding bin (start from 0)
# with your defined get_bin_idx(), return bin_index_per_label: [Ns,]
bin_index_per_label = [get_bin_idx(label,bin_size=bin_size,contain_zero=True) for label in labels]
# calculate empirical (original) label distribution: [Nb,]
# "Nb" is the number of bins
Nb = max(bin_index_per_label) + 1
num_samples_of_bins = dict(Counter(bin_index_per_label))
emp_label_dist = [num_samples_of_bins.get(i, 0) for i in range(Nb)]
# lds_kernel_window: [ks,], here for example, we use gaussian, ks=5, sigma=2
lds_kernel_window = get_lds_kernel_window(kernel='gaussian', ks=5, sigma=2)
# calculate effective label distribution: [Nb,]
eff_label_dist = convolve1d(np.array(emp_label_dist), weights=lds_kernel_window, mode='constant')
# Use re-weighting based on effective label distribution, sample-wise weights: [Ns,]
eff_num_per_label = [eff_label_dist[bin_idx] for bin_idx in bin_index_per_label]
weights = [np.float32(len(data_list) / x) for x in eff_num_per_label]
for i,j in zip(data_list,weights): i['weight'] = j ** weighted_power # calculate the weight of y in loop
return data_list
def raw_LDS_weight(data_list, bin_size, weighted_power):
# preds, labels: [Ns,], "Ns" is the number of total samples
labels = [_[3]['intensity'] for _ in data_list]
CEs = [int(_[4]['CE']) for _ in data_list]
# assign each label to its corresponding bin (start from 0)
# with your defined get_bin_idx(), return bin_index_per_label: [Ns,]
bin_index_per_label = [get_bin_idx(label,bin_size=bin_size,contain_zero=True) for label in labels]
# calculate empirical (original) label distribution: [Nb,]
# "Nb" is the number of bins
Nb = max(bin_index_per_label) + 1
num_samples_of_bins = dict(Counter(bin_index_per_label))
emp_label_dist = [num_samples_of_bins.get(i, 0) for i in range(Nb)]
# lds_kernel_window: [ks,], here for example, we use gaussian, ks=5, sigma=2
lds_kernel_window = get_lds_kernel_window(kernel='gaussian', ks=5, sigma=2)
# calculate effective label distribution: [Nb,]
eff_label_dist = convolve1d(np.array(emp_label_dist), weights=lds_kernel_window, mode='constant')
# Use re-weighting based on effective label distribution, sample-wise weights: [Ns,]
eff_num_per_label = [eff_label_dist[bin_idx] for bin_idx in bin_index_per_label]
# weights_int = [np.float32(len(data_list) / x) for x in eff_num_per_label]
# weights_CE = [CE ** 1 for CE in CEs]
# weights = [x * y for x, y in zip(weights_int, weights_CE)]
weights = [np.float32(len(data_list) / x) for x in eff_num_per_label]
for i,j in zip(data_list,weights): i[3]['weight'] = j ** weighted_power # calculate the weight of y in loop
return data_list | Python |
3D | zhengfj1994/PPGB_MS2 | utils6_training_evaluation.py | .py | 9,075 | 191 | import torch
import pandas as pd
from IPython import display
from matplotlib import pyplot as plt
from matplotlib_inline import backend_inline
from utils6_loss import WeightedFocalLoss
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from utils6_loss import weighted_mse_loss
from utils6_loss import weighted_focal_mse_loss
from utils6_loss import BMCLossMD
class Animator():
def __init__(self, xlim, xlabel=None, ylabel=None, legend=None,ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'),figsize=(3.5, 2.5)) -> None:
self.xlabel = xlabel
self.ylabel = ylabel
self.xscale = xscale
self.yscale = yscale
self.xlim = xlim
self.ylim = ylim
self.legend = legend
if legend is None:
legend = []
backend_inline.set_matplotlib_formats('svg')
self.fig, self.axes = plt.subplots(figsize=figsize)
self.x = None
self.y = None
self.fmts = fmts
def set_axes(self):
self.axes.set_xlabel(self.xlabel)
self.axes.set_ylabel(self.ylabel)
self.axes.set_xscale(self.xscale)
self.axes.set_yscale(self.yscale)
self.axes.set_xlim(self.xlim)
self.axes.set_ylim(self.ylim)
if self.legend:
self.axes.legend(self.legend)
self.axes.grid()
def show(self,x,y):
self.axes.cla()
for i in range(len(x)):
self.axes.plot(x[i],y[i],self.fmts[i])
self.set_axes()
display.display(self.fig)
display.clear_output(wait=True)
def model_training(model, device, train_loader, val_loader, use_metadata, optimizer, scheduler, loss_function, alpha, epochs, best_model_file_path, training_curve_file):
print("Start training")
model.to(device)
if loss_function == 'WeightedFocalLoss': criterion = WeightedFocalLoss(alpha = alpha)
fig = Animator(xlim=(-0.1,epochs+0.1),legend=["train_MAE","val_MAE"])
epoch_x = [[],[]]
training_curve = [[],[]]
if loss_function == 'WeightedFocalLoss':
best_accuracy = 0
else:
best_MAE = 999
model.train()
for epoch in range(epochs):
tot_loss = 0.0
tot_acc = 0.0
train_preds = []
train_trues = []
model.train()
for i,train_batch in enumerate(train_loader):
x, edge_attr, edge_index, edge_type, batch, train_label_batch = train_batch.x.to(device), train_batch.edge_attr.to(device), train_batch.edge_index.to(device), train_batch.edge_type.to(device), train_batch.batch.to(device), train_batch.y.to(device)
if loss_function == 'WeightedFocalLoss':
train_label_batch[train_label_batch > 0] = 1.0
if use_metadata:
MetaData = train_batch.MetaData.to(device)
train_outputs = model(x, edge_attr, edge_index, edge_type, MetaData, batch) # perform a single forward pass
else:
train_outputs = model(x, edge_attr, edge_index, edge_type, batch) # perform a single forward pass
if loss_function == 'WeightedFocalLoss': loss = criterion(train_outputs.flatten(), train_label_batch.float())
elif loss_function == 'weighted_mse_loss': loss = weighted_mse_loss(train_outputs.flatten(), train_label_batch, weights=torch.Tensor(train_batch.weight).to(device))
loss.backward()
optimizer.step()
optimizer.zero_grad()
tot_loss += loss.data
if loss_function == 'WeightedFocalLoss': train_preds.extend((torch.sigmoid(train_outputs) >= 0.5).float().detach().cpu().numpy())
else: train_preds.extend(train_outputs.detach().cpu().numpy())
train_trues.extend(train_label_batch.detach().cpu().numpy())
scheduler.step()
if loss_function == 'WeightedFocalLoss':
train_accuracy = accuracy_score(train_trues, train_preds)
train_precision = precision_score(train_trues, train_preds)
train_recall = recall_score(train_trues, train_preds)
train_f1 = f1_score(train_trues, train_preds)
print("[sklearn_metrics] Epoch:{} loss:{:.4f} accuracy:{:.4f} precision:{:.4f} recall:{:.4f} f1:{:.4f}".format(epoch, tot_loss, train_accuracy, train_precision, train_recall, train_f1))
else:
train_MSE = mean_squared_error(train_trues, train_preds)
train_MAE = mean_absolute_error(train_trues, train_preds)
train_R2 = r2_score(train_trues, train_preds)
print("[sklearn_metrics] Epoch:{} loss:{:.4f}".format(epoch, tot_loss))
print("[sklearn_metrics] dropout_MSE:{:.4f} dropout_MAE:{:.4f} dropout_R2:{:.4f}".format(train_MSE, train_MAE, train_R2))
train_preds = []
train_trues = []
test_preds = []
test_trues = []
model.eval()
with torch.no_grad():
for i,test_batch in enumerate(val_loader):
x, edge_attr, edge_index, edge_type, batch, test_data_label = test_batch.x.to(device), test_batch.edge_attr.to(device), test_batch.edge_index.to(device), test_batch.edge_type.to(device), test_batch.batch.to(device), test_batch.y.to(device)
test_data_label = test_batch.y
if loss_function == 'WeightedFocalLoss':
test_data_label[test_data_label > 0] = 1.0
if use_metadata:
MetaData = test_batch.MetaData.to(device)
test_outputs = model(x, edge_attr, edge_index, edge_type, MetaData, batch) # perform a single forward pass
else:
test_outputs = model(x, edge_attr, edge_index, edge_type, batch) # perform a single forward pass
if loss_function == 'WeightedFocalLoss': test_preds.extend((torch.sigmoid(test_outputs) >= 0.5).float().detach().cpu().numpy())
else: test_preds.extend(test_outputs.detach().cpu().numpy())
test_trues.extend(test_data_label.numpy())
if loss_function == 'WeightedFocalLoss':
test_accuracy = accuracy_score(test_trues, test_preds)
test_precision = precision_score(test_trues, test_preds)
test_recall = recall_score(test_trues, test_preds)
test_f1 = f1_score(test_trues, test_preds)
conf_matrix = confusion_matrix(test_trues, test_preds)
print("[sklearn_metrics] accuracy:{:.4f} precision:{:.4f} recall:{:.4f} f1:{:.4f}".format(test_accuracy, test_precision, test_recall, test_f1))
else:
test_MSE = mean_squared_error(test_trues, test_preds)
test_MAE = mean_absolute_error(test_trues, test_preds)
test_R2 = r2_score(test_trues, test_preds)
print("[sklearn_metrics] test_MSE:{:.4f} test_MAE:{:.4f} test_R2:{:.4f}".format(test_MSE, test_MAE, test_R2))
torch.save(model, best_model_file_path[:best_model_file_path.index(".pt")] + "_realtime" + best_model_file_path[best_model_file_path.index(".pt"):])
if loss_function == 'WeightedFocalLoss':
if test_f1 > best_accuracy:
best_accuracy = test_f1
torch.save(model, best_model_file_path)
else:
if test_MAE < best_MAE:
best_MAE = test_MAE
torch.save(model, best_model_file_path)
epoch_x[0].append(epoch)
if loss_function == 'WeightedFocalLoss':
training_curve[0].append(train_f1)
else:
training_curve[0].append(train_MAE)
epoch_x[1].append(epoch)
if loss_function == 'WeightedFocalLoss':
training_curve[1].append(test_f1)
else:
training_curve[1].append(test_MAE)
fig.show(epoch_x,training_curve)
trainResult = pd.DataFrame(training_curve).T
if loss_function == 'WeightedFocalLoss':
trainResult.columns = ['train_F1','val_F1']
else:
trainResult.columns = ['train_MAE','val_MAE']
trainResult.to_csv(training_curve_file)
return training_curve
def model_predict(data, model, device, loss_function, use_metadata):
model.to(device)
with torch.no_grad():
x, edge_attr, edge_index, edge_type, batch = data.x.to(device), data.edge_attr.to(device), data.edge_index.to(device), data.edge_type.to(device), data.batch.to(device)
if use_metadata:
MetaData = data.MetaData.to(device)
out = model(x, edge_attr, edge_index, edge_type, MetaData, batch)
else:
out = model(x, edge_attr, edge_index, edge_type, batch)
if loss_function == 'WeightedFocalLoss': pred = (torch.sigmoid(out) >= 0.5).float().flatten()
else: pred = out
return pred | Python |
3D | zhengfj1994/PPGB_MS2 | smiles_grouping.py | .py | 4,653 | 71 | import os
import pickle
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
adduct_dict = {'[M+H]+' : {'mz': 1.00727646677, 'smiles':'.[H+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[H+]' : {'mz': 1.00727646677, 'smiles':'.[H+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[M+Na]+': {'mz': 22.989218, 'smiles':'.[Na+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[Na]+': {'mz': 22.989218, 'smiles':'.[Na+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[M+K]+' : {'mz': 38.963158, 'smiles':'.[K+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[K]+' : {'mz': 38.963158, 'smiles':'.[K+]', 'charge_mz': -0.00054857990924, 'charge_smiles': '.[+]', 'ion_mode':'positive'},
'[M-H]-' : {'mz': -1.00727646677, 'smiles':'.[M-H][-]', 'charge_mz': 0.00054857990924, 'charge_smiles': '.[-]', 'ion_mode':'negative'},
'[M-H][-]' : {'mz': -1.00727646677, 'smiles':'.[M-H][-]', 'charge_mz': 0.00054857990924, 'charge_smiles': '.[-]', 'ion_mode':'negative'},
'[+]':{'mz': -0.00054857990924, 'smiles':'.[+]', 'ion_mode':'positive'},
'[-]':{'mz': 0.00054857990924, 'smiles':'.[-]', 'ion_mode':'negative'}}
def smiles_grouping(fragmentation_as_reaction_list, adduct, mz_threshold):
ionMode = adduct_dict[adduct]['ion_mode']
adduct_MW = adduct_dict[adduct]['mz']
precusor_smiles = fragmentation_as_reaction_list[0]
product_smiles_list = fragmentation_as_reaction_list[1]
product_mol_list = [Chem.MolFromSmiles(i) for i in product_smiles_list]
product_charge_list = [sum([j.GetFormalCharge() for j in i.GetAtoms()]) if i is not None else 99 for i in product_mol_list]
df = pd.DataFrame({'product_smiles_list':product_smiles_list, 'product_mol_list':product_mol_list, 'product_charge_list':product_charge_list})
if ionMode == 'positive':
df_charge = df.loc[(df['product_charge_list'] == 0) | (df['product_charge_list'] == 1)]
elif ionMode == 'negative':
df_charge = df.loc[(df['product_charge_list'] == 0) | (df['product_charge_list'] == -1)]
product_smiles_list = df_charge['product_smiles_list'].tolist()
product_mol_list = df_charge['product_mol_list'].tolist()
product_charge_list = df_charge['product_charge_list'].tolist()
product_MW_list = [round(Descriptors.ExactMolWt(Chem.MolFromSmiles(i))+adduct_MW, 5) if j == 0 else round(Descriptors.ExactMolWt(Chem.MolFromSmiles(i)),5) for i,j in zip(product_smiles_list, product_charge_list)]
df2 = pd.DataFrame({"smiles":product_smiles_list,"product_mz":product_MW_list}) # Create a dataframe with graph and product mz
group = df2.groupby("product_mz") # Group the dataframe with product mz
group_result = [[ithGroup[0],list(ithGroup[1]['smiles']),precusor_smiles] for ithGroup in group]
return [i for i in group_result if i[0] > mz_threshold]
if __name__=='__main__':
load_file = open('training data/NIST 20/Agilent 6530 Q-TOF/Step 1/AABLHGPVOULICI-ZOFKVTQNSA-N.pkl',"rb") # open file
fragmentation_as_reaction_list = pickle.load(load_file) # open file
test = smiles_grouping(fragmentation_as_reaction_list,adduct='[M+H]+')
def batch_smiles_grouping(step1FilePath, step2FilePath, adducts = ['[M+H]+','[M-H]-','[M+Na]+','[M+K]+'], mz_threshold = 25, n_jobs = -1):
if not os.path.exists(step2FilePath): os.makedirs(step2FilePath)
for ithAdduct in adducts:
def my_task(i):
if ithAdduct + '_' + i not in os.listdir(step2FilePath): # If ith item is not processed
load_file = open(step1FilePath + '/' + i,"rb") # open file
try: fragmentation_as_reaction_list = pickle.load(load_file) # open file
except: return 'error' # if open file failed
group_result = smiles_grouping(fragmentation_as_reaction_list, adduct = ithAdduct, mz_threshold = mz_threshold) # Merge by group
save_file = open(step2FilePath + '/' + ithAdduct + '_' + i,"wb") # save file
pickle.dump(group_result, save_file) # save file
save_file.close() # save file
return i
else:
return 'done'
lst1 = os.listdir(step1FilePath)
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task)(i) for i in tqdm(lst1)) | Python |
3D | zhengfj1994/PPGB_MS2 | utils2_graph_grouping.py | .py | 4,908 | 93 | import os
import pickle
import itertools
import pandas as pd
import networkx as nx
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
def disjoint_all_graph(GraphList, MSMS, argumentation = False):
if argumentation:
disjointed_graph_list = []
sub_GraphLists = []
for i in range(max(1,len(GraphList)-2), len(GraphList)+1):
for combo in itertools.combinations(GraphList, i):
sub_GraphLists.append(combo)
for ithGraphList in sub_GraphLists:
ithGraphList = list(ithGraphList)
fragment_type = [_.graph['fragment_type'] for _ in ithGraphList]
adduct = [_.graph['adduct'] for _ in ithGraphList]
parent_SMILES = [_.graph['parent_SMILES'] for _ in ithGraphList]
parent_mz = [_.graph['parent_mz'] for _ in ithGraphList]
product_SMILES = [_.graph['product_SMILES'] for _ in ithGraphList]
disjointed_graph = nx.disjoint_union_all(ithGraphList)
for _ in disjointed_graph.edges(data=True):
if _[2]['bond_type'] == 'Fragmentation':
disjointed_graph.add_edges_from([(_[1], disjointed_graph.nodes(data=True)[_[1]]['atom_map_num'], {'bond_type':'Fragmentation'})])
delete_index = [_ for _ in range(ithGraphList[0].number_of_nodes(),disjointed_graph.number_of_nodes()) if disjointed_graph.nodes(data=True)[_]['is_parent']]
disjointed_graph.remove_nodes_from(delete_index)
disjointed_graph = nx.disjoint_union(disjointed_graph,nx.Graph())
disjointed_graph.graph['fragment_type'] = fragment_type
disjointed_graph.graph['adduct'] = adduct
disjointed_graph.graph['parent_SMILES'] = parent_SMILES
disjointed_graph.graph['parent_mz'] = parent_mz
disjointed_graph.graph['product_SMILES'] = product_SMILES
disjointed_graph.graph['argumentation_ratio'] = len(ithGraphList)/len(GraphList)
disjointed_graph_list.append(disjointed_graph)
return disjointed_graph_list
else:
fragment_type = [_.graph['fragment_type'] for _ in GraphList]
adduct = [_.graph['adduct'] for _ in GraphList]
parent_SMILES = [_.graph['parent_SMILES'] for _ in GraphList]
parent_mz = [_.graph['parent_mz'] for _ in GraphList]
product_SMILES = [_.graph['product_SMILES'] for _ in GraphList]
disjointed_graph = nx.disjoint_union_all(GraphList)
for _ in disjointed_graph.edges(data=True):
if _[2]['bond_type'] == 'Fragmentation':
disjointed_graph.add_edges_from([(_[1], disjointed_graph.nodes(data=True)[_[1]]['atom_map_num'], {'bond_type':'Fragmentation'})])
delete_index = [_ for _ in range(GraphList[0].number_of_nodes(),disjointed_graph.number_of_nodes()) if disjointed_graph.nodes(data=True)[_]['is_parent']] # Find nodes need to delete and delete them
disjointed_graph.remove_nodes_from(delete_index)
disjointed_graph = nx.disjoint_union(disjointed_graph,nx.Graph())
disjointed_graph.graph['fragment_type'] = fragment_type
disjointed_graph.graph['adduct'] = adduct
disjointed_graph.graph['parent_SMILES'] = parent_SMILES
disjointed_graph.graph['parent_mz'] = parent_mz
disjointed_graph.graph['product_SMILES'] = product_SMILES
disjointed_graph.graph['argumentation_ratio'] = 1
return disjointed_graph
def group_disjoint(fragment_list, MSMS, argumentation):
product_mz = [_.graph['product_mz'] for _ in fragment_list]
df=pd.DataFrame({
"graph":fragment_list,
"product_mz":product_mz
})
group = df.groupby("product_mz")
if argumentation:
list_2d = [disjoint_all_graph(list(_[1]['graph']),MSMS=MSMS,argumentation=False) for _ in group]
flat_list = [item for sublist in list_2d for item in (sublist if isinstance(sublist, list) else [sublist])]
return flat_list
else:
return [disjoint_all_graph(list(_[1]['graph']),MSMS=MSMS,argumentation=False) for _ in group]
def batch_graph_grouping(step1FilePath, step2FilePath, n_jobs = -1):
if not os.path.exists(step2FilePath): os.makedirs(step2FilePath)
def my_task(i):
if i not in os.listdir(step2FilePath):
load_file = open(step1FilePath + '/' + i,"rb")
try: fragment_list = pickle.load(load_file)
except: return 'error'
grouped_graph = group_disjoint(fragment_list, MSMS=None, argumentation=True)
save_file = open(step2FilePath + '/' + i,"wb")
pickle.dump(grouped_graph, save_file)
save_file.close()
return i
else:
return 'done'
lst1 = os.listdir(step1FilePath)
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task)(i) for i in tqdm(lst1)) | Python |
3D | zhengfj1994/PPGB_MS2 | utils6_model.py | .py | 3,975 | 86 | import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from pydantic.dataclasses import dataclass
from torch_geometric.nn import RGCNConv, TransformerConv
from torch_geometric.nn import global_max_pool
from torch_geometric.nn import global_add_pool
from torch_geometric.nn import global_mean_pool
from torch_geometric.nn import BatchNorm
from torch_geometric.nn.aggr import Set2Set
from torch.nn import Linear, BatchNorm1d, ModuleList
from torch_geometric.nn import TransformerConv, TopKPooling
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
class RGCN(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, graph_pooling, loss_function):
super(RGCN, self).__init__()
torch.manual_seed(10)
self.convs = torch.nn.ModuleList()
self.convs.append(RGCNConv(input_channels,100,num_relations=3))
self.convs.append(RGCNConv(100,hidden_channels,num_relations=3))
for _ in range(2): self.convs.append(RGCNConv(hidden_channels,hidden_channels,num_relations=3))
self.norms = torch.nn.ModuleList()
self.norms.append(BatchNorm(100))
self.norms.append(BatchNorm(hidden_channels))
for _ in range(2): self.norms.append(BatchNorm(hidden_channels))
for i in range(4): init.xavier_uniform_(self.convs[i].weight)
if graph_pooling == "sum": self.pool = global_add_pool
elif graph_pooling == "mean": self.pool = global_mean_pool
elif graph_pooling == "max": self.pool = global_max_pool
elif graph_pooling == "set2set": self.pool = Set2Set(hidden_channels, processing_steps=2)
else: raise ValueError("Invalid graph pooling type.")
if graph_pooling == "set2set": self.lin1 = nn.Linear(2*hidden_channels + 38, 100)
else: self.lin1 = nn.Linear(hidden_channels + 38, 100)
self.lin2 = nn.Linear(100, 1)
def forward(self, x, edge_attr, edge_index, edge_type, MetaData, batch):
for conv, norm in zip(self.convs, self.norms):
x = norm(conv(x, edge_index, edge_type))
x = self.pool(x, batch)
x = torch.cat([x, MetaData], dim=1)
x = self.lin1(x)
x = F.dropout(x, p = 0.5, training=self.training)
x = self.lin2(x)
return x
class RGCN_without_metadata(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, graph_pooling, loss_function):
super(RGCN_without_metadata, self).__init__()
torch.manual_seed(10)
self.convs = torch.nn.ModuleList()
self.convs.append(RGCNConv(input_channels,100,num_relations=3))
self.convs.append(RGCNConv(100,hidden_channels,num_relations=3))
for _ in range(2): self.convs.append(RGCNConv(hidden_channels,hidden_channels,num_relations=3))
self.norms = torch.nn.ModuleList()
self.norms.append(BatchNorm(100))
self.norms.append(BatchNorm(hidden_channels))
for _ in range(2): self.norms.append(BatchNorm(hidden_channels))
for i in range(4): init.xavier_uniform_(self.convs[i].weight)
if graph_pooling == "sum": self.pool = global_add_pool
elif graph_pooling == "mean": self.pool = global_mean_pool
elif graph_pooling == "max": self.pool = global_max_pool
elif graph_pooling == "set2set": self.pool = Set2Set(hidden_channels, processing_steps=2)
else: raise ValueError("Invalid graph pooling type.")
if graph_pooling == "set2set": self.lin1 = nn.Linear(2*hidden_channels, 100)
else: self.lin1 = nn.Linear(hidden_channels, 100)
self.lin2 = nn.Linear(100, 1)
def forward(self, x, edge_attr, edge_index, edge_type, batch):
for conv, norm in zip(self.convs, self.norms):
x = norm(conv(x, edge_index, edge_type))
x = self.pool(x, batch)
x = self.lin1(x)
x = F.dropout(x, p = 0.5, training=self.training)
x = self.lin2(x)
return x | Python |
3D | zhengfj1994/PPGB_MS2 | smiles_to_pyg_graph.py | .py | 8,935 | 217 | import os
import torch
import pickle
import torch_geometric
from utils import pickle_load
from tqdm.notebook import tqdm
from rdkit import Chem, RDLogger
from joblib import Parallel, delayed
from torch_geometric.data import Data
def one_hot_k_encode(x, permitted_list):
if x not in permitted_list: x = permitted_list[-1]
binary_encoding = [int(boolean_value) for boolean_value in list(map(lambda s: x == s, permitted_list))]
return binary_encoding
x_map = {
'atomic_num':
list(range(0, 119)),
'chirality': [
'CHI_UNSPECIFIED',
'CHI_TETRAHEDRAL_CW',
'CHI_TETRAHEDRAL_CCW',
'CHI_OTHER',
'CHI_TETRAHEDRAL',
'CHI_ALLENE',
'CHI_SQUAREPLANAR',
'CHI_TRIGONALBIPYRAMIDAL',
'CHI_OCTAHEDRAL',
],
'degree':
list(range(0, 11)),
'formal_charge':
list(range(-5, 7)),
'num_hs':
list(range(0, 9)),
'num_radical_electrons':
list(range(0, 5)),
'hybridization': [
'UNSPECIFIED',
'S',
'SP',
'SP2',
'SP3',
'SP3D',
'SP3D2',
'OTHER',
],
'is_aromatic': [False, True],
'is_in_ring': [False, True],
}
e_map = {
'bond_type': [
'SINGLE',
'DOUBLE',
'TRIPLE',
'AROMATIC'
]
}
meta_map = {
'InstrumentManufacturer': ['Sciex', 'Thremo', 'Agilent', 'Bruker', 'Shimadzu', 'Waters', 'Others'],
'IonSource': ['ESI', 'APCI', 'EI', 'Others'],
'DetectorType': ['TOF', 'Orbitrap', 'Others'],
'FragmentationType': ['CID', 'HCD', 'EAD', 'Others'],
'CollisionGas': ['N2', 'He', 'Others'],
'AdductType': ['[M+H]+', '[M-H]-', '[M+Na]+', '[M+H-H2O]+', '[M+NH4]+', '[M+FA-H]-', '[2M+H]+', '[2M-H]-', '[M-2H2O+H]+', '[M]+', '[M+Cl]-', '[M-H2O-H]-', '[M+K]+', 'Others']
}
def from_smiles(smiles: str, precusor: bool = True) -> 'torch_geometric.data.Data':
r"""Converts a SMILES string to a :class:`torch_geometric.data.Data`
instance.
Args:
smiles (str): The SMILES string.
with_hydrogen (bool, optional): If set to :obj:`True`, will store
hydrogens in the molecule graph. (default: :obj:`False`)
kekulize (bool, optional): If set to :obj:`True`, converts aromatic
bonds to single/double bonds. (default: :obj:`False`)
"""
RDLogger.DisableLog('rdApp.*')
precusor_mol = Chem.MolFromSmiles(smiles, sanitize = True)
Chem.SanitizeMol(precusor_mol)
if precusor_mol is None:
precusor_mol = Chem.MolFromSmiles('')
xs = []
for atom in precusor_mol.GetAtoms():
x = []
if precusor:
x += [0]
else:
x += [1]
x.append(atom.GetAtomMapNum())
x.append(x_map['atomic_num'].index(atom.GetAtomicNum()))
x += one_hot_k_encode(atom.GetChiralTag(),x_map['chirality'])
x.append(x_map['degree'].index(atom.GetTotalDegree()))
x.append(x_map['formal_charge'].index(atom.GetFormalCharge()))
x.append(x_map['num_hs'].index(atom.GetTotalNumHs()))
x.append(x_map['num_radical_electrons'].index(atom.GetNumRadicalElectrons()))
x += one_hot_k_encode(atom.GetHybridization(),x_map['hybridization'])
x.append(x_map['is_aromatic'].index(atom.GetIsAromatic()))
x.append(x_map['is_in_ring'].index(atom.IsInRing()))
xs.append(x)
x = torch.tensor(xs, dtype=torch.float).view(-1,26)
edge_indices, edge_attrs, edge_type = [], [], []
for bond in precusor_mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
e = []
e.append(e_map['bond_type'].index(str(bond.GetBondType())))
edge_indices += [[i, j], [j, i]]
edge_attrs += [e, e]
if precusor:
edge_type += [0, 0]
else:
edge_type += [1, 1]
edge_index = torch.tensor(edge_indices)
edge_index = edge_index.t().to(torch.long).view(2, -1)
edge_attr = torch.tensor(edge_attrs, dtype=torch.long)
if edge_index.numel() > 0: # Sort indices.
perm = (edge_index[0] * x.size(0) + edge_index[1]).argsort()
edge_index, edge_attr = edge_index[:, perm], edge_attr[perm]
return Data(x=x, edge_attr=edge_attr, edge_type=edge_type, edge_index=edge_index)
def precusor_product_graph_combination(precusor_graph, product_graphs):
fragmentation_edge_attr = torch.Tensor([4]).view(-1, 1)
combined_graph = precusor_graph
for ith_product_graph in product_graphs:
combined_graph = Data(x = torch.cat([combined_graph['x'], ith_product_graph['x']]),
edge_index = torch.cat([combined_graph['edge_index'], ith_product_graph['edge_index']+combined_graph['x'].shape[0]],dim=1),
edge_attr = torch.cat([combined_graph['edge_attr'], ith_product_graph['edge_attr']]),
edge_type = combined_graph['edge_type']+ith_product_graph['edge_type'],)
isPrecusor = combined_graph['x'][:,0].numpy().tolist()
atom_map_num = combined_graph['x'][:,1].numpy().tolist()
raw_edge_index = combined_graph['edge_index']
raw_edge_attr = combined_graph['edge_attr']
raw_edge_type = combined_graph['edge_type']
for ith_isPrecusor, ith_atom_map_num in zip(isPrecusor, atom_map_num):
if ith_isPrecusor == 1: break
precusor_product_map = [i for i, x in enumerate(atom_map_num) if x == ith_atom_map_num]
if len(precusor_product_map) == 1:
continue
else:
for ith_pair in range(1, len(precusor_product_map)):
temp_edge_index = torch.Tensor([[precusor_product_map[ith_pair]], [precusor_product_map[0]]]).to(torch.long)
raw_edge_index = torch.cat((raw_edge_index, temp_edge_index), dim=1)
raw_edge_attr = torch.cat((raw_edge_attr, fragmentation_edge_attr), dim=0)
raw_edge_type += [2]
combined_graph['x'] = torch.cat((combined_graph['x'][:, 2:],combined_graph['x'][:, :1]), dim=1)
combined_graph['edge_attr'] = raw_edge_attr
combined_graph['edge_type'] = torch.Tensor(raw_edge_type).long()
combined_graph['edge_index'] = raw_edge_index.view(2, -1)
return combined_graph
def precusor_product_graphs_generation(raw_data, type):
precusor_smiles = raw_data[2]
product_smiles = raw_data[1]
precusor_graph = from_smiles(smiles=precusor_smiles, precusor=True)
product_graphs = [from_smiles(smiles=i, precusor=False) for i in product_smiles]
precusor_product_graph = precusor_product_graph_combination(precusor_graph=precusor_graph, product_graphs=product_graphs)
if type == 'prediction':
precusor_product_graph['mz'] = torch.tensor(raw_data[0], dtype=torch.float).to(torch.float32)
else:
precusor_product_graph['weight'] = torch.tensor(raw_data[3]['weight'], dtype=torch.float).to(torch.float32)
precusor_product_graph['y'] = torch.tensor(raw_data[3]['intensity'], dtype=torch.float).to(torch.float32)
InstrumentManufacturer = torch.tensor(one_hot_k_encode(raw_data[4]['InstrumentManufacturer'],meta_map['InstrumentManufacturer']))
IonSource = torch.tensor(one_hot_k_encode(raw_data[4]['IonSource'],meta_map['IonSource']))
DetectorType = torch.tensor(one_hot_k_encode(raw_data[4]['DetectorType'],meta_map['DetectorType']))
FragmentationType = torch.tensor(one_hot_k_encode(raw_data[4]['FragmentationType'],meta_map['FragmentationType']))
CollisionGas = torch.tensor(one_hot_k_encode(raw_data[4]['CollisionGas'],meta_map['CollisionGas']))
AdductType = torch.tensor(one_hot_k_encode(raw_data[4]['AdductType'],meta_map['AdductType']))
CE = torch.unsqueeze(torch.tensor(raw_data[4]['CE'], dtype=torch.float).to(torch.float32),dim=0)
KE = torch.unsqueeze(torch.tensor(raw_data[4]['KE'], dtype=torch.float).to(torch.float32),dim=0)
EBC = torch.unsqueeze(torch.tensor(raw_data[4]['EBC'], dtype=torch.float).to(torch.float32),dim=0)
precusor_product_graph['MetaData'] = torch.cat((InstrumentManufacturer,IonSource,DetectorType,FragmentationType,CollisionGas,AdductType,CE,KE,EBC), dim=0).unsqueeze(0)
return precusor_product_graph
def batch_transfer_pyg_graph(step4FilePath, step5FilePath, MW_threshold, n_jobs = -1):
if not os.path.exists(step5FilePath): os.makedirs(step5FilePath)
step4Files = os.listdir(step4FilePath)
def my_task(i):
ith_Graph_list = pickle_load(step4FilePath + '/' + i)
ith_Graph_list['group_result'] = [_ for _ in ith_Graph_list if _[0] >= MW_threshold]
if i not in os.listdir(step5FilePath):
data_list = precusor_product_graphs_generation(ith_Graph_list, type = 'regression')
save_file = open(step5FilePath+'/'+i,"wb")
pickle.dump(data_list, save_file)
save_file.close()
lst1 = step4Files
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task)(i) for i in tqdm(lst1)) | Python |
3D | zhengfj1994/PPGB_MS2 | smiles_fragmentation.py | .py | 15,013 | 236 | import os
import pickle
import pandas as pd
from rdkit import Chem
from itertools import chain
from tqdm.notebook import tqdm
from rdkit.Chem import AllChem
from rdkit.Chem import Descriptors
from joblib import Parallel, delayed
########################################## fragmentation reaction
crf1_0 = AllChem.ReactionFromSmarts('[*+0;!H0:0][*+0:1][*+0:2]>>([*+0:0]=[*+0:1].[*+0:2])')
crf1_1 = AllChem.ReactionFromSmarts('[*+0;!H0:0]=[*+0:1][*+0:2]>>([*+0:0]#[*+0:1].[*+0:2])')
crf2_0 = AllChem.ReactionFromSmarts('[*+0:0][*+0:1][*!H0+0:2]>>([*+0:0].[*+0:1]=[*+0:2])')
crf2_1 = AllChem.ReactionFromSmarts('[*+0:0][*+0:1]=[*!H0+0:2]>>([*+0:0].[*+0:1]#[*+0:2])')
crf3_0 = AllChem.ReactionFromSmarts('[*+0:0]1[*+0:1]=[*+0:2][*+0:3][*+0:4][*+0:5]1>>([*+0:0]=[*+0:1][*+0:2]=[*+0:3].[*+0:4]=[*+0:5])')
crf3_1 = AllChem.ReactionFromSmarts('[*+0:0]1[*+0:1]=[*+0:2][*+0:3][*+0:4]=[*+0:5]1>>([*+0:0]=[*+0:1][*+0:2]=[*+0:3].[*+0:4]#[*+0:5])')
crf4_0 = AllChem.ReactionFromSmarts('[*+0:0]=[*+0:1][*+0:2][*+0:3][*+0;!H0:4]>>([*+0:0][*+0:1]=[*+0:2].[*+0:3]=[*+0:4])')
crf5_0 = AllChem.ReactionFromSmarts('[*+0;!H0:0][*+0:1][*+0:2][*+0;!H0:3]>>([*+0:0]=[*+0:1].[*+0:2]=[*+0:3])')
crf6_0 = AllChem.ReactionFromSmarts('[*+0:0]=[*+0:1][*+0:2]=[*+0:3][*+0:4]=[*+0:5][*+0:6]=[*+0:7]>>([*+0:0]=[*+0:7].[*+0:2]=[*+0:3][*+0:4]=[*+0:5][*+0:6]=[*+0:1])')
crf7_0 = AllChem.ReactionFromSmarts('[*+0:0]1[*+0:1][*+0:2][*+0:3][*+0:4][*+0:5]1>>([*+0:0]=[*+0:1].[*+0:2]=[*+0:3].[*+0:4]=[*+0:5])')
crf7_1 = AllChem.ReactionFromSmarts('[*+0:0]1=[*+0:1][*+0:2][*+0:3][*+0:4][*+0:5]1>>([*+0:0]#[*+0:1].[*+0:2]=[*+0:3].[*+0:4]=[*+0:5])')
crf7_2 = AllChem.ReactionFromSmarts('[*+0:0]1=[*+0:1][*+0:2]=[*+0:3][*+0:4][*+0:5]1>>([*+0:0]#[*+0:1].[*+0:2]#[*+0:3].[*+0:4]=[*+0:5])')
crf7_3 = AllChem.ReactionFromSmarts('[*+0:0]1=[*+0:1][*+0:2]=[*+0:3][*+0:4]=[*+0:5]1>>([*+0:0]#[*+0:1].[*+0:2]#[*+0:3].[*+0:4]#[*+0:5])')
crf8_0 = AllChem.ReactionFromSmarts('[*+0;!H0:0][*+0:1]=[*+0:2]>>([*+0:0]=[*+0:1][*+0:2])')
crf8_1 = AllChem.ReactionFromSmarts('[*+0;!H0:0][*+0:1]=[*+0:2][*+0:3]=[*+0:4]>>([*+0:0]=[*+0:1][*+0:2]=[*+0:3][*+0:4])')
crf8_2 = AllChem.ReactionFromSmarts('[*+0;!H0:0][*+0:1]=[*+0:2][*+0:3]=[*+0:4][*+0:5]=[*+0:6]>>([*+0:0]=[*+0:1][*+0:2]=[*+0:3][*+0:4]=[*+0:5][*+0:6])')
crf9_0 = AllChem.ReactionFromSmarts('[*:0]@[C:1](=[O:2])@[*:3]>>([*:0][*:3].[C-:1]#[O+:2])')
crf11_0= AllChem.ReactionFromSmarts('[*+0;!H0:0][*+0;!H0:1]>>([*+0:0]=[*+0:1])')
crf11_1= AllChem.ReactionFromSmarts('[*+0;!H0:0]=[*+0;!H0:1]>>([*+0:0]#[*+0:1])')
crf12_0= AllChem.ReactionFromSmarts('[*+0:0][*+0:1][*+0:2][*+0:3]>>([*+0:0][*+0:3].[*+0:1]=[*+0:2])')
crf12_1= AllChem.ReactionFromSmarts('[*+0:0][*+0:1]=[*+0:2][*+0:3]>>([*+0:0][*+0:3].[*+0:1]#[*+0:2])')
crf13_0= AllChem.ReactionFromSmarts('[*+0:0][C+0:1](=[O+0:2])[OH+0:3]>>([*+0:0].[O+0:2]=[C+0:1]=[O+0:3])')
crf13_1= AllChem.ReactionFromSmarts('[C+0;H1:0](=[O+0:1])[O+0:2][*+0:3]>>([O+0:1]=[C+0:0]=[O+0:2].[*+0:3])')
crf13_2= AllChem.ReactionFromSmarts('[*+0:0][C+0:1](=[O+0:2])[O+0:3][*+0:4]>>([O+0:2]=[C+0:1]=[O+0:3].[*+0:0][*+0:4])')
cmf2_0 = AllChem.ReactionFromSmarts('[N+0,O+0,S+0:0][*+0:1][*:2]>>([N+1,O+1,S+1:0]=[*+0:1].[*+0:2])')
cmf2_1 = AllChem.ReactionFromSmarts('[N+0,O+0,S+0:0]=[*+0:1][*:2]>>([N+1,O+1,S+1:0]#[*+0:1].[*+0:2])')
cmf7_0 = AllChem.ReactionFromSmarts('[*+0:0][*+0:1]>>([*+1:0].[*-1:1])')
cmf13_0= AllChem.ReactionFromSmarts('[*+0:0]1=[*+0:1][*+0:2]=[*+0:3][*+0:4]=[*+0:5]1>>([*+0:0]1=[*+0:1][*+0:2]=[*+0:3][*+1:4]1.[*:5])')
########################################## fragmentation reaction
# Create a list to save reactions
reactions = [crf1_0,crf1_1,crf2_0,crf2_1,crf3_0,crf3_1,crf4_0,crf5_0,crf6_0,crf7_0,crf7_1,crf7_2,crf7_3,crf8_0,crf8_1,crf8_2,crf9_0,
crf11_0,crf11_1,crf12_0,crf12_1,crf13_0,crf13_1,crf13_2,cmf2_0,cmf2_1,cmf7_0,cmf13_0]
def remove_extra_Hs(mol):
for atom in mol.GetAtoms():
bonds = atom.GetBonds() # Get all bonds of an atom
bonds_prop = [ithBond.GetBondTypeAsDouble() for ithBond in bonds] # Get all bond type as a list
defaultValence = Chem.GetPeriodicTable().GetDefaultValence(atom.GetSymbol()) # Get the default valence of atom
numExplicitHs = atom.GetNumExplicitHs() # Get the number of explicit Hs
numFormalCharge = atom.GetFormalCharge() # Get formal charge
actualValence = int(sum(bonds_prop) + numExplicitHs - numFormalCharge) # use bonds, explicit Hs, formal charge to calculate actual valence
if actualValence > defaultValence:
if numExplicitHs-(actualValence-defaultValence) >= 0:
atom.SetNumExplicitHs(numExplicitHs-(actualValence-defaultValence))
return mol
def make_rxns(rxn, precusor_smiles):
precusor_mol = Chem.MolFromSmiles(precusor_smiles) # get precusor mol
Chem.Kekulize(precusor_mol, clearAromaticFlags=True) # kekulize molecule
for atom in precusor_mol.GetAtoms():
if atom.HasProp('molAtomMapNumber') != 1:
atom.SetIntProp('molAtomMapNumber', atom.GetIdx())
atom.SetIntProp('originalAtomMapNumber', atom.GetAtomMapNum())
product_set1 = rxn.RunReactants([precusor_mol])
precusor_precusor_reaction = AllChem.ChemicalReaction()
precusor_precusor_reaction.AddReactantTemplate(precusor_mol)
precusor_precusor_reaction.AddProductTemplate(precusor_mol)
precusor_precusor_reaction_smiles = AllChem.ReactionToSmiles(precusor_precusor_reaction)
if len(product_set1) == 0: return [precusor_precusor_reaction_smiles]
atomIdxs = [atom.GetIdx() for atom in precusor_mol.GetAtoms()]
atomMaps = [int(atom.GetProp('originalAtomMapNumber')) for atom in precusor_mol.GetAtoms()]
atomIdxs_Maps = dict(zip(atomIdxs, atomMaps))
new_rxns = []
for pset in product_set1:
for prod in pset:
prod = remove_extra_Hs(prod)
new_rxn = AllChem.ChemicalReaction()
new_rxn.AddReactantTemplate(precusor_mol)
for a in prod.GetAtoms():
if not a.GetSymbol() == '*':
a.SetIntProp('molAtomMapNumber', atomIdxs_Maps[int(a.GetProp('react_atom_idx'))])
a.SetIntProp('originalAtomMapNumber', atomIdxs_Maps[int(a.GetProp('react_atom_idx'))])
new_rxn.AddProductTemplate(prod)
new_rxn = AllChem.ReactionToSmiles(new_rxn)
new_rxns.append(new_rxn)
return [precusor_precusor_reaction_smiles] + new_rxns
def batch_fragmentation(precusor_smiles, reactions):
reaction_smiles = [make_rxns(rxn = ithReaction, precusor_smiles = precusor_smiles) for ithReaction in reactions]
result = list({*chain.from_iterable(i for i in reaction_smiles)})
return result
def two_steps_fragmentation(precusor_smiles, reactions):
print(precusor_smiles)
reaction_smiles_step1 = batch_fragmentation(precusor_smiles = precusor_smiles, reactions = reactions)
if len(reaction_smiles_step1) > 0:
product_smiles_step1 = [i.split('>>')[1][1:-1] if '.' in i else i.split('>>')[1] for i in reaction_smiles_step1]
product_smiles_step1 = [i.split('.') for i in product_smiles_step1]
product_smiles_step1 = [*chain.from_iterable(i for i in product_smiles_step1)]
product_mol_step1 = [Chem.MolFromSmiles(i) for i in product_smiles_step1]
product_inchikey_step1 = [Chem.MolToInchiKey(i) if i is not None else None for i in product_mol_step1]
step1_df = pd.DataFrame({'smiles':product_smiles_step1,
'inchikey':product_inchikey_step1})
step1_df.dropna(subset=['smiles', 'inchikey'], inplace=True)
unique_step1_df = step1_df.drop_duplicates(subset=['inchikey'])
unique_product_smiles_step1 = unique_step1_df['smiles'].tolist()
reaction_smiles_step2 = [batch_fragmentation(precusor_smiles = i, reactions = reactions) for i in unique_product_smiles_step1]
reaction_smiles_step2 = [*chain.from_iterable(x for x in reaction_smiles_step2)]
if len(reaction_smiles_step2) > 0:
product_smiles_step2 = [i.split('>>')[1][1:-1] if '.' in i else i.split('>>')[1] for i in reaction_smiles_step2]
product_smiles_step2 = [i.split('.') for i in product_smiles_step2]
product_smiles_step2 = [*chain.from_iterable(i for i in product_smiles_step2)]
mol_step2 = [Chem.MolFromSmiles(i) for i in product_smiles_step2]
atomMap = [tuple(sorted([atom.GetAtomMapNum() for atom in i.GetAtoms()])) if i is not None else tuple([]) for i in mol_step2]
ExactMolWt = [Descriptors.ExactMolWt(i) if i is not None else -100 for i in mol_step2]
NumRadicalElectrons = [Descriptors.NumRadicalElectrons(i) if i is not None else 0 for i in mol_step2]
NumValenceElectrons = [Descriptors.NumValenceElectrons(i) if i is not None else 0 for i in mol_step2]
step2_df = pd.DataFrame({'atomMap':atomMap,
'ExactMolWt':ExactMolWt,
'NumRadicalElectrons':NumRadicalElectrons,
'NumValenceElectrons':NumValenceElectrons,
'product_smiles_step2': product_smiles_step2})
unique_step2_df = step2_df.drop_duplicates(subset=['atomMap', 'ExactMolWt', 'NumRadicalElectrons', 'NumValenceElectrons'])
unique_product_smiles_step2 = unique_step2_df['product_smiles_step2'].tolist()
return reaction_smiles_step1[0].split('>>')[0], [reaction_smiles_step1[0].split('>>')[0]] + unique_product_smiles_step2
else:
return reaction_smiles_step1[0].split('>>')[0], [reaction_smiles_step1[0].split('>>')[0]]
def three_steps_fragmentation(precusor_smiles, reactions):
reaction_smiles_step1 = batch_fragmentation(precusor_smiles = precusor_smiles, reactions = reactions)
product_smiles_step1 = [i.split('>>')[1][1:-1] if '.' in i else i.split('>>')[1] for i in reaction_smiles_step1]
product_smiles_step1 = [i.split('.') for i in product_smiles_step1]
product_smiles_step1 = [*chain.from_iterable(i for i in product_smiles_step1)]
product_smiles_step1 = list(set(product_smiles_step1))
reaction_smiles_step2 = [batch_fragmentation(precusor_smiles = i, reactions = reactions) for i in product_smiles_step1]
reaction_smiles_step2 = [*chain.from_iterable(x for x in reaction_smiles_step2)]
product_smiles_step2 = [i.split('>>')[1][1:-1] if '.' in i else i.split('>>')[1] for i in reaction_smiles_step2]
product_smiles_step2 = [i.split('.') for i in product_smiles_step2]
product_smiles_step2 = [*chain.from_iterable(i for i in product_smiles_step2)]
product_smiles_step2 = list(set(product_smiles_step2))
reaction_smiles_step3 = [batch_fragmentation(precusor_smiles = i, reactions = reactions) for i in product_smiles_step2]
reaction_smiles_step3 = [*chain.from_iterable(x for x in reaction_smiles_step3)]
product_smiles_step3 = [i.split('>>')[1][1:-1] if '.' in i else i.split('>>')[1] for i in reaction_smiles_step3]
product_smiles_step3 = [i.split('.') for i in product_smiles_step3]
product_smiles_step3 = [*chain.from_iterable(i for i in product_smiles_step3)]
product_smiles_step3 = list(set(product_smiles_step3))
mol_final = [Chem.MolFromSmiles(i) for i in product_smiles_step3]
atomMap_final = [tuple(sorted([atom.GetAtomMapNum() for atom in i.GetAtoms()])) if i is not None else tuple([]) for i in mol_final]
ExactMolWt_final = [Descriptors.ExactMolWt(i) if i is not None else -100 for i in mol_final]
NumRadicalElectrons_final = [Descriptors.NumRadicalElectrons(i) if i is not None else 0 for i in mol_final]
NumValenceElectrons_final = [Descriptors.NumValenceElectrons(i) if i is not None else 0 for i in mol_final]
df_final = pd.DataFrame({'atomMap':atomMap_final,
'ExactMolWt':ExactMolWt_final,
'NumRadicalElectrons':NumRadicalElectrons_final,
'NumValenceElectrons':NumValenceElectrons_final,
'product_smiles': product_smiles_step3})
unique_df_final = df_final.drop_duplicates(subset=['atomMap', 'ExactMolWt', 'NumRadicalElectrons', 'NumValenceElectrons'])
unique_product_smiles_final = unique_df_final['product_smiles'].tolist()
return reaction_smiles_step1[0].split('>>')[0], unique_product_smiles_final
def batch_smiles_fragmentation(rawCsvFilePath, step1FilePath, reactions, steps, n_jobs = -1):
if not os.path.exists(step1FilePath): os.makedirs(step1FilePath)
rawCsvData = pd.read_csv(rawCsvFilePath, encoding=u'gbk')
rawCsvData.drop_duplicates(subset=['inchikey'], keep='first', inplace=True)
a = rawCsvData['inchikey'].values.tolist()
b = [_.replace('.pkl','') for _ in os.listdir(step1FilePath)]
c1 = list(set(a)- set(b))
csvData = rawCsvData[rawCsvData['inchikey'].isin(c1)]
if steps == 2:
def my_task(i):
inchikey_file_name = str(csvData['inchikey'].values[i]) + '.pkl'
if inchikey_file_name not in os.listdir(step1FilePath): # If ith item is not processed
mol = Chem.MolFromSmiles(csvData['smiles'].values[i])
if mol == None:
return 'none'
if Chem.rdmolops.GetFormalCharge(mol) != 0:
return 'none'
if Descriptors.ExactMolWt(mol) > 1000:
return '> 1000 Da'
precusor_maped_smiles, product_maped_smiles = two_steps_fragmentation(precusor_smiles = csvData['smiles'].values[i], reactions=reactions)
save_file = open(step1FilePath + '/' + str(csvData['inchikey'].values[i]) + '.pkl',"wb")
pickle.dump([precusor_maped_smiles,product_maped_smiles], save_file)
save_file.close()
return i
else:
return 'done'
elif steps == 3:
def my_task(i):
inchikey_file_name = str(csvData['inchikey'].values[i]) + '.pkl'
if inchikey_file_name not in os.listdir(step1FilePath): # If ith item is not processed
mol = Chem.MolFromSmiles(csvData['smiles'].values[i])
if mol == None:
return 'none'
if Chem.rdmolops.GetFormalCharge(mol) != 0:
return 'none'
if Descriptors.ExactMolWt(mol) > 1000:
return '> 1000 Da'
precusor_maped_smiles, product_maped_smiles = three_steps_fragmentation(precusor_smiles = csvData['smiles'].values[i], reactions=reactions)
save_file = open(step1FilePath + '/' + str(csvData['inchikey'].values[i]) + '.pkl',"wb")
pickle.dump([precusor_maped_smiles,product_maped_smiles], save_file) #顺序存入变量
save_file.close()
return i
else:
return 'done'
else:
return('error')
lst1 = list(range(len(csvData)))
Parallel(n_jobs = n_jobs, verbose = 1)(delayed(my_task)(i) for i in tqdm(lst1)) | Python |
3D | Parmeggiani-Lab/elfin | theories_and_assumptions.md | .md | 5,111 | 59 | # elfin theories and assumptions
[D] is a definition
[T] is a given truths or unproven theories derived from given truths
[A] is an assumption we use to bound the problem that elfin attempts to solve
[S] is a speculation
## Protein Database
- [D] A "module" is a protein that has two or more uncapped termini (interfaces)
- [T] Modules are slightly flexible
- [A] Each module "pose" can be treated as a rigid snapshot
- [T] In general modules are of different sizes
- [T] In general modules have different termini transformations (delta transformation between two termini)
- [T] Termini are either type N or type C
- [T] Modules maybe single- or multi-chain
- [D] A "cap" is an one-terminus protein that can close up a dangling interface
- [D] A "module sequence" consists of multiple modules that are connected to one another via valid interfaces
- [D] A "hub" is a multi-chain module that can join different module sequences
- [D] A "symmetric" hub must have identical module sequences stemming from each of its termini
- [D] An "asymmetric" hub is a hub without the symmetric hub restriction
## Algorithm
- [T] Module interface connectivity can be modelled as an incomplete directed cyclic graph with an uneven distribution of node degrees
- [D] A "module sequence" is a valid walk in the connectivity graph
- [D] A "network" is a collection of module sequences joined by hubs
- [A] Two networks can only be separate networks if they cannot or should not be joined together
- [A] An elfin design output is a collection of networks
- [A] Elfin attempts to solve a general shape optimization problem: build a structure that fits a desired shape defined as a 3D guiding graph using smaller modules as components
- [T] The user does not know beforehand the optimal module choice. It follows that elfin must insert or remove nodes where necessary so modules can actually connect together. Hence a node in the guiding graph does not necessarily correspond to one module in the solution
- [T] Since elfin may need to insert or remove nodes, the solution size becomes another search parameter
- [T] Since modules may not be able to satify certain shapes, there may or may not exist a perfect solution
- [T] In the extremely rare special case where there is an exact optimal solution, it can be checked in O(n) time if the solution is size n (e.g. an artificial case where each node in the guiding path sits exactly on their corresponding optimal module COM)
- [T] In the general case, given one input guiding graph and one solution, checking optimality boils down to searching the solution space to see if a better solution exists
- [T] Searching for the optimally fitting module sequence in the connectivity graph is a special case of the Precedence Constrained [Knapsack Problem](https://en.wikipedia.org/wiki/Knapsack_problem) (PCKP).
- [T] In addition to PCKP, elfin's problem is unbounded (nodes are repeatable) and immediate-precendence (i.e. last picked node has an arc to the current choice, as opposed to any-one-precedence and all-precendence).
- [S] Could not confirm complexity lower bound
- [T] Solution space grows exponentially with solution size (from my thesis)
- [T] Many Knapsack problems can be solved with dynamic programming, but that relies on the problem being a 0-1 Knapsack problem.
- [S] Elfin is not a 0-1 Knapsack problem because each solution might visit a node multiple times, and the order of visits matter. These seem to be a hint that dynamic programming cannot help elfin solve its problem
## Beyond the general guiding graph
On top of the using a guiding graph as input, we want to allow the user to manually specify parts of the solution. This comes in the following forms:
1. Placing specific modules in 3D space
2. Connecting specific modules to path guides
3. Specify spatial tolerance between a module and a path guide
- [D] A "path guide" is a pure guiding graph that may or may not connect to user-defined modules (in a possibly hybrid elfin problem statement)
Numbers 1 and 2 help reduce the search space by reducing the solution size and also by restricting interface choices. However, number 3 is a bit less obvious. At first look, it seems that spatial tolerances create a volume in which hinges may reside and angle "fans" in which hinges could face, thus requiring infinite(!) more pose checks.
- [D] A "hinge" is a user-specified module that connects to a path guide
Since we simply can't afford to search infinitely more poses, we could try to discretize the tolerance volume/angle fans. However, that requires a granularity parameter, and still increases the search space by at least an order of magnitude.
A better way is to simply evaluate a candidate based on the hinge node emitting zero score if it satisfies the spatial tolerance. If there is no spatial tolerance, the tolerant volume is just a single point and the tolerant angle fan a single vector. This adds constant amount of work in evaluation, but doesn't require any increase in search space. | Markdown |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/extrude_single_at_single_n.py | .py | 2,355 | 73 | #!/usr/bin/env python3
#
# A PyMol extension script to test extrusion of a single module from a single
# module's n-term
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
from elfinpy import utilities
import numpy as np
import os
@cmd.extend
def extrude_single_at_single_n(single_name=None, ext_single_name=None):
"""Extrudes a single at the n-terminus of a single module.
Args:
- single_name - string name of the fixed single
- ext_single_name - string name of the extension single
"""
if single_name is None or \
ext_single_name is None:
print(extrude_single_at_single_n.__doc__)
else:
double_name = '-'.join([ext_single_name, single_name])
pdb_dir = os.getcwd() + '/../../resources/pdb_aligned/'
cmd.load(pdb_dir + '/doubles/' + double_name + '.pdb')
cmd.set_name(double_name, 'double')
cmd.load(pdb_dir + '/singles/' + single_name + '.pdb')
cmd.set_name(single_name, 'single')
cmd.load(pdb_dir + '/singles/' + ext_single_name + '.pdb')
cmd.set_name(ext_single_name, 'single-ext')
xdb=utilities.read_json(os.getcwd() + '/../../resources/xdb.json')
double_info = xdb['double_data'][ext_single_name][single_name]
# first, drop the double (into its A frame) for reference
tx('double', rot=double_info['rot'], tran_after=double_info['tran'])
# extrude N term - drop into double's A frame
tx('single-ext', rot=double_info['rot'], tran_after=double_info['tran'])
cmd.disable('single-*')
cmd.enable('single-ext')
noclip()
print('Extruded Single {} at Single {}\'s N-Term'.\
format(ext_single_name, single_name))
@cmd.extend
def extrude_single_at_single_n_example():
extrude_single_at_single_n(single_name='D79_j1_D54', ext_single_name='D79')
print('Extrude Single At Single N Loaded')
| Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/extension_template.py | .py | 403 | 24 | #!/usr/bin/env python3
#
# A PyMol extension script template
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
print('Template Extension Loaded') | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/draw_lines.py | .py | 5,465 | 185 | #!/usr/bin/env python3
#
# A PyMol extension script for drawing lines.
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
import pymol.cgo # constants
from pymol import cmd
from pymol.vfont import plain
import numpy as np
import json
class LineCounter:
"""A counter to keep track of how many lines are drawn and to be used in naming
the next line.
"""
def __init__(self):
self.count = 1
def add(self):
self.count += 1
def get(self):
return self.count
counter = LineCounter()
@cmd.extend
def draw_line(starting_point=None, line_vector=None, color=(1,1,1), width=1.0, label='', font_size=12):
"""Draws a line.
Args:
- starting_point - 3-value list or tuple (x,y,z)
- line_vector - 3-value list or tuple (i,j,k)
- color - 3-value list or tuple (r,g,b)
- width - float
- label - stirng
- font_size - float
"""
if starting_point is None or len(starting_point) != 3 \
or line_vector is None or len(line_vector) != 3:
print(draw_line.__doc__)
else :
x,y,z = starting_point
i,j,k = line_vector
r,g,b = color
d = width * 2.5 # cone base diameter
obj = [
pymol.cgo.COLOR, r,g,b,
pymol.cgo.SPHERE, x,y,z, d,
pymol.cgo.CYLINDER, x,y,z, i,j,k, width, r,g,b, r,g,b,
pymol.cgo.SPHERE, i,j,k, d,
]
# # add labels to axes object
font_thickness = font_size / 10
pymol.cgo.cyl_text(obj,plain,[i+5,j+5,k],label,
font_thickness,axes=[[font_size,0,0],[0,font_size,0],[0,0,font_size]])
cmd.load_cgo(obj,'line'+str(counter.get()))
counter.add()
@cmd.extend
def draw_points(points=[], scale=1.0, width=3.0, color=(0,0,0)):
"""Draws points and joins them up using colored lines.
Args:
- points - list of 3-value lists
- scale - float
- width - float
- color - 3-value list or tuple
"""
points = np.asarray(points) * scale
r,g,b = color
for (p1, p2) in zip(points, np.roll(points, -1, axis=0))[0:-1]:
draw_line(p1, p2, width=width, color=(r,g,b))
@cmd.extend
def draw_csv(spec_file=None, scale=1.0, width=2.0, centered=False, shift=None):
"""Draws points specified by a csv.
Args:
- spec_file - string path
- scale - float
- width - float
- centered - boolean
- shift - 3-value list or tuple
"""
if spec_file is None:
print(draw_csv.__doc__)
else:
with open(spec_file, 'r') as file:
pts = np.asarray([[float(n) for n in re.split(', *| *', l.strip())] for l in file.read().split('\n') if len(l) > 0])
if centered:
pts = pts - pts[-1]
dists = [np.linalg.norm(p-[0,0,0]) for p in pts]
if shift is not None:
pts += np.asarray(shift) * np.mean(dists)
draw_points(pts, scale=scale, width=width)
cmd.reset()
cmd.set("depth_cue", 0)
@cmd.extend
def draw_json(spec_file, scale=1.0, width=2.0, centered=False, shift=None):
"""Draws points specified by a json.
Args:
- spec_file - string path
- scale - float
- width - float
- centered - boolean
- shift - 3-value list or tuple
"""
if spec_file is None:
print(draw_csv.__doc__)
else:
with open(spec_file, 'r') as file:
pts = np.asarray(json.load(file)['coms'])
if centered:
pts = pts - pts[-1]
dists = [np.linalg.norm(p-[0,0,0]) for p in pts]
if shift is not None:
pts += np.asarray(shift) * np.mean(dists)
draw_points(pts, scale=scale, width=width)
cmd.reset()
cmd.set("depth_cue", 0)
@cmd.extend
def draw_axes(length=500, width=2, font_size=20):
"""Draws the XYZ axes."""
draw_line(
starting_point=(-length,0,0),
line_vector=(length,0,0),
color=(1,0,0),
width=width,
label='X',
font_size=font_size);
draw_line(
starting_point=(0,-length,0),
line_vector=(0,length,0),
color=(0,1,0),
width=width,
label='Y',
font_size=font_size);
draw_line(
starting_point=(0,0,-length),
line_vector=(0,0,length),
color=(0,0,1),
width=width,
label='Z',
font_size=font_size);
cmd.reset()
cmd.set("depth_cue", 0)
@cmd.extend
def noclip():
"""Sets clipping to nearly infinity."""
cmd.clip('near', 99999999)
cmd.clip('far', -99999999)
draw_axes()
noclip()
print('Line Utils Extension Loaded') | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/extrude_hub_at_single_n.py | .py | 2,736 | 82 | #!/usr/bin/env python3
#
# A PyMol extension script to test extrusion of a hub from a single module's
# n-term
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
from elfinpy import utilities
import numpy as np
import os
@cmd.extend
def extrude_hub_at_single_n(single_name=None, hub_name=None, component_id=None):
"""Extrudes a hub at the n-terminus of a single module.
Args:
- single_name - string
- hub_name - string
- component_id - string id of the module component inside the hub to
extend into
"""
if single_name is None or \
hub_name is None or \
component_id is None:
print(extrude_hub_at_single_n.__doc__)
else:
double_name = '-'.join([single_name, single_name])
pdb_dir = os.getcwd() + '/../../resources/pdb_aligned/'
cmd.load(pdb_dir + '/singles/' + single_name + '.pdb')
cmd.set_name(single_name, 'single')
cmd.load(pdb_dir + '/doubles/' + double_name + '.pdb')
cmd.set_name(double_name, 'double')
cmd.load(pdb_dir + '/doubles/' + double_name + '.pdb')
cmd.set_name(double_name, 'double-o')
cmd.load(pdb_dir + '/hubs/' + hub_name + '.pdb')
cmd.set_name(hub_name, 'hub')
xdb=utilities.read_json(os.getcwd() + '/../../resources/xdb.json')
double_info = xdb['double_data'][single_name][single_name]
# first, drop the double for reference
tx('double', rot=double_info['rot'], tran_after=double_info['tran'])
hub_comp_info = xdb['hub_data'][hub_name]['component_info']
comp_a_cc = hub_comp_info[component_id]['c_connections'][single_name]
# The frame drop order is important here
# drop hub into component frame
tx('hub', rot=comp_a_cc['rot'], tran_after=comp_a_cc['tran'])
# drop hub into double's frame
tx('hub', rot=double_info['rot'], tran_after=double_info['tran'])
noclip()
print('Extruded Hub {} Component {} at Single {}\'s N-Term'.\
format(hub_name, component_id, single_name))
@cmd.extend
def extrude_hub_at_single_n_example():
extrude_hub_at_single_n(single_name='D79', hub_name='D79_aC2_04', component_id='B')
print('Extrude Hub At Single N Loaded') | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/__init__.py | .py | 0 | 0 | null | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/load_all_extensions.py | .py | 870 | 39 | #!/usr/bin/env python2
#
# This is a PyMol extension script to load all PyMol extensions in the same
# directory.
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
import sys, os, glob
curr_dir = os.getcwd()
sys.path.append(os.path.join(curr_dir, os.pardir, os.pardir)) # for elfinpy
import importlib
ext_exclusion = ['__init__.py', 'load_all_extensions.py']
extensions = [
py for py in glob.glob(os.getcwd() + '/*.py') \
if os.path.basename(py) not in ext_exclusion \
]
for ext in extensions:
cmd.load(ext)
print('All Extensions Loaded') | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/extrude_hub_at_single_c.py | .py | 2,172 | 68 | #!/usr/bin/env python3
#
# A PyMol extension script to test extrusion of a hub from a single module's
# c-term
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
from elfinpy import utilities
import os
@cmd.extend
def extrude_hub_at_single_c(single_name=None, hub_name=None, component_id=None):
"""Extrudes a hub at the c-terminus of a single module.
Args:
- single_name - string
- hub_name - string
- component_id - string, indicating which module component inside the hub
to extend into
"""
if single_name is None or \
hub_name is None or \
component_id is None:
print(extrude_hub_at_single_c.__doc__)
else:
double_name = '-'.join([single_name, single_name])
pdb_dir = os.getcwd() + '/../../resources/pdb_aligned/'
cmd.load(pdb_dir + '/singles/' + single_name + '.pdb')
cmd.set_name(single_name, 'single')
cmd.load(pdb_dir + '/doubles/' + double_name + '.pdb')
cmd.set_name(double_name, 'double')
cmd.load(pdb_dir + '/hubs/' + hub_name + '.pdb')
cmd.set_name(hub_name, 'hub')
xdb=utilities.read_json(os.getcwd() + '/../../resources/xdb.json')
hub_comp_info = xdb['hub_data'][hub_name]['component_info']
comp_a_cc = hub_comp_info[component_id]['n_connections'][single_name]
tx('hub', rot=comp_a_cc['rot'], tran_after=comp_a_cc['tran'])
noclip()
print('Extruded Hub {} Component {} at Single {}\'s C-Term'.\
format(hub_name, component_id, single_name))
@cmd.extend
def extrude_hub_at_single_c_example():
extrude_hub_at_single_c(single_name='D4', hub_name='D4_C3_02', component_id='B')
print('Extrude Hub At Single Loaded') | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/transform_helper.py | .py | 1,855 | 65 | #!/usr/bin/env python3
#
# A PyMol extension script to shorten the transform_selection() command and
# accept python lists as argument.
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
import numpy as np
@cmd.extend
def tx(
obj_name=None,
tran_before=[0,0,0],
rot=[[1,0,0],[0,1,0],[0,0,1]],
tran_after=[0,0,0]
):
"""Transforms an object.
Args:
- obj_name - string
- tran_before - a 3x1 translation vector applied before rotation
- rot - a 3x3 rotation matrix
- tran_after - a 3x1 translation vector applied after rotation
"""
if obj_name is None:
print(tx.__doc__)
else:
rot_tran_mat = np.array(rot)
rot_tran_mat = np.append(rot_tran_mat, np.transpose([tran_after]), axis=1)
rot_tran_mat = np.append(rot_tran_mat, [tran_before + [1]], axis=0)
pymol_rot_tran_vals = [v for row in rot_tran_mat for v in row]
cmd.transform_selection(obj_name, matrix=pymol_rot_tran_vals, homogenous=0)
@cmd.extend
def multi_tx(obj_name=None, rottran_list=[]):
"""Transforms an object over a list of rot, tran tuples.
Args:
- obj_name - string
- rottran_list - a list of (rot, tran) lists or tuples
"""
if obj_name is None:
print(multi_tx.__doc__)
else:
for rot, tran in rottran_list:
etc(obj_name, rot=rot, tran=tran)
print('Transform Helper Loaded') | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/extrude_single_at_single_c.py | .py | 2,269 | 74 | #!/usr/bin/env python3
#
# A PyMol extension script to test extrusion of a single module from a single
# module's c-term
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
from elfinpy import utilities
import numpy as np
import os
@cmd.extend
def extrude_single_at_single_c(single_name=None, ext_single_name=None):
"""Extrudes a single at the c-terminus of a single module.
Args:
- single_name - string name of the fixed single
- ext_single_name - string name of the extension single
"""
if single_name is None or \
ext_single_name is None:
print(extrude_single_at_single_c.__doc__)
else:
double_name = '-'.join([single_name, ext_single_name])
pdb_dir = os.getcwd() + '/../../resources/pdb_aligned/'
cmd.load(pdb_dir + '/doubles/' + double_name + '.pdb')
cmd.set_name(double_name, 'double')
cmd.load(pdb_dir + '/singles/' + single_name + '.pdb')
cmd.set_name(single_name, 'single')
cmd.load(pdb_dir + '/singles/' + ext_single_name + '.pdb')
cmd.set_name(ext_single_name, 'single-ext')
xdb=utilities.read_json(os.getcwd() + '/../../resources/xdb.json')
double_info = xdb['double_data'][single_name][ext_single_name]
# extrude C term - raise
tx(
'single-ext',
rot=np.array(double_info['rot']).transpose().tolist(),
tran_before=[-t for t in double_info['tran']]
)
cmd.disable('single-*')
cmd.enable('single-ext')
noclip()
print('Extruded Single {} at Single {}\'s N-Term'.\
format(ext_single_name, single_name))
@cmd.extend
def extrude_single_at_single_c_example():
extrude_single_at_single_c(single_name='D79', ext_single_name='D79_j1_D54')
print('Extrude Single At Single C Loaded')
| Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/batch_convert.py | .py | 2,221 | 76 | #!/usr/bin/env python3
#
# A PyMol extension script for batch converting objects (originally intended
# to convert into .obj models).
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
import glob
from elfinpy.utilities import make_dir
@cmd.extend
def batch_convert_modules(src_dir=None, dst_dir=None, ext='obj'):
"""Batch convert Elfin protein module PDBs.
Args:
- src_dir - input PDB directory (one that contains sub_dirs like singles,
doubles, hubs)
- dst_dir - output PDB directory
- ext - file extension supported by PyMol
"""
if src_dir is None or dst_dir is None:
print(batch_convert.__doc__)
else:
# Clear workspace
cmd.reinitialize()
sub_dirs = ['singles', 'doubles', 'hubs'] # don't think we need cappings
for sd in sub_dirs:
make_dir(dst_dir + '/' + sd)
files = [f for flist in [glob.glob(src_dir + '/{}/*.pdb'.format(sd)) for sd in sub_dirs] for f in flist]
cmd.set('auto_show_nonbonded', 'off')
cmd.set('auto_show_selections', 'off')
cmd.set('auto_show_spheres', 'off')
cmd.set('auto_show_classified', 'off')
cmd.set('auto_show_lines', 'off')
fn_info = []
for f in files:
cmd.load(f)
name = '.'.join((re.split(r'/|\\', f)[-1]).split('.')[:-1])
fn_info.append((f, name))
cmd.disable('all')
cmd.show('cartoon')
for (file_path, module_name) in fn_info:
cmd.enable(module_name)
cmd.save(file_path.replace(src_dir, dst_dir).replace('.pdb', '.' + ext))
cmd.disable(module_name)
# Clear workspace
cmd.delete('all')
print('Batch Convert Loaded') | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/deprecated/color_by_restype.py | .py | 2,636 | 117 | # Copyright (c) 2004 Robert L. Campbell
import colorsys,sys
from pymol import cmd
aa_1_3 = {
'A': 'ALA',
'C': 'CYS',
'D': 'ASP',
'E': 'GLU',
'F': 'PHE',
'G': 'GLY',
'H': 'HIS',
'I': 'ILE',
'K': 'LYS',
'L': 'LEU',
'M': 'MET',
'N': 'ASN',
'P': 'PRO',
'Q': 'GLN',
'R': 'ARG',
'S': 'SER',
'T': 'THR',
'V': 'VAL',
'W': 'TRP',
'Y': 'TYR',
}
aa_3_1 = {
'ALA' : 'A',
'CYS' : 'C',
'ASP' : 'D',
'GLU' : 'E',
'PHE' : 'F',
'GLY' : 'G',
'HIS' : 'H',
'ILE' : 'I',
'LYS' : 'K',
'LEU' : 'L',
'MET' : 'M',
'ASN' : 'N',
'PRO' : 'P',
'GLN' : 'Q',
'ARG' : 'R',
'SER' : 'S',
'THR' : 'T',
'VAL' : 'V',
'TRP' : 'W',
'TYR' : 'Y',
}
aa_types = {
'A': 'hydrophobic',
'C': 'cysteine',
'D': 'negative',
'E': 'negative',
'F': 'aromatic',
'G': 'glycine',
'H': 'polar',
'I': 'hydrophobic',
'K': 'positive',
'L': 'hydrophobic',
'M': 'hydrophobic',
'N': 'polar',
'P': 'proline',
'Q': 'polar',
'R': 'positive',
'S': 'polar',
'T': 'polar',
'V': 'hydrophobic',
'W': 'aromatic',
'Y': 'aromatic',
}
def color_by_restype(selection="all",
hydrophobic='grey90',
aromatic='lightpink',
polar='palecyan',
positive='blue',
negative='red',
cysteine='paleyellow',
proline='palegreen',
glycine='green',
):
"""
usage: color_by_restype <selection>, <optional overrides of default colors>
e.g. color_by_restype protein and chain A, hydrophobic=wheat
Residue groups: Default colours:
hydrophobic: AILMV grey90
aromatic: FWY lightpink
polar: HNQST palecyan
positive: KR blue
negative: DE red
cysteine: C paleyellow
proline: P palegreen
glycine: G green
"""
colors = {
'hydrophobic': hydrophobic,
'aromatic': aromatic,
'polar': polar,
'positive': positive,
'negative': negative,
'cysteine': cysteine,
'proline': proline,
'glycine': glycine,
}
for aa in aa_types:
sel = selection + " and r. %s" % aa_1_3[aa]
# print sel,"-->", colors[aa_types[aa]]
cmd.color(colors[aa_types[aa]],sel)
cmd.extend("color_by_restype",color_by_restype) | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/deprecated/center_of_mass.py | .py | 2,243 | 87 | """
See more here: http://www.pymolwiki.org/index.php/center_of_mass
DESCRIPTION
Places a pseudoatom at the center of mass
Author: Sean Law
Michigan State University
slaw (at) msu . edu
SEE ALSO
pseudoatom, get_com
"""
from __future__ import print_function
from pymol import cmd
def com(selection, state=None, mass=None, object=None, quiet=0, **kwargs):
quiet = int(quiet)
if (object == None):
try:
object = cmd.get_legal_name(selection)
object = cmd.get_unused_name(object + "_COM", 0)
except AttributeError:
object = 'COM'
cmd.delete(object)
if (state != None):
x, y, z = get_com(selection, mass=mass, quiet=quiet)
if not quiet:
print("[%f %f %f]" % (x, y, z))
cmd.pseudoatom(object, pos=[x, y, z], **kwargs)
cmd.show("spheres", object)
else:
for i in range(cmd.count_states()):
x, y, z = get_com(selection, mass=mass, state=i + 1, quiet=quiet)
if not quiet:
# print("State %d:%f %f %f" % (i + 1, x, y, z))
print("[%f, %f, %f]" % (i + 1, x, y, z))
cmd.pseudoatom(object, pos=[x, y, z], state=i + 1, **kwargs)
cmd.show("spheres", 'last ' + object)
cmd.extend("com", com)
def get_com(selection, state=1, mass=None, quiet=1):
"""
DESCRIPTION
Calculates the center of mass
Author: Sean Law
Michigan State University
slaw (at) msu . edu
"""
quiet = int(quiet)
totmass = 0.0
if mass != None and not quiet:
print("Calculating mass-weighted COM")
state = int(state)
model = cmd.get_model(selection, state)
x, y, z = 0, 0, 0
for a in model.atom:
if (mass != None):
m = a.get_mass()
x += a.coord[0] * m
y += a.coord[1] * m
z += a.coord[2] * m
totmass += m
else:
x += a.coord[0]
y += a.coord[1]
z += a.coord[2]
if (mass != None):
return [x / totmass, y / totmass, z / totmass]
else:
return [x / len(model.atom), y / len(model.atom), z / len(model.atom)]
cmd.extend("get_com", get_com)
# vi:expandtab:sw=3 | Python |
3D | Parmeggiani-Lab/elfin | pymol_scripts/extensions/deprecated/compare_solutions.py | .py | 1,798 | 67 | #!/usr/bin/env python3
#
# A PyMol extension script to compare Elfin solution against specification.
#
# *Needs to be re-implemented to deal with new spec and solution format.
#
from pymol import cmd
import numpy as np
import elfinpy
def compare_solutions(spec_file=None, sol_csv_file=None):
"""
Compares solution center-of-mass points again the specification.
Args:
- spec_file - a csv or json file string path
- sol_csv_file - a csv file string path
"""
if spec_file is None or sol_csv_file is None:
print(compare_solutions.__doc__)
else:
if spec_file.rfind('.csv') != -1:
spec_pts = elfinpy.read_csv_points(spec_file)
elif spec_file.rfind('.json') != -1:
with open(spec_file, 'r') as file:
spec_pts = np.asarray(json.load(file)['coms'])
else:
print 'Unknown spec file format'
sol_pts = elfinpy.read_csv_points(sol_csv_file)
# Centre both pts
centred_spec = spec_pts - np.mean(spec_pts, axis=0)
centred_sol = sol_pts - np.mean(sol_pts, axis=0)
# Draw specification
draw_pts(centred_spec, color=[0.7,0,0])
# Equalise sample points
specUpPts = elfinpy.upsample(centred_spec, centred_sol)
draw_pts(specUpPts, color=[0.5,0.5,0])
# Find Kabsch rotation for solution -> spec
R = kabsch.run_kabsch(centred_spec, specUpPts)
centredSpecR = np.dot(centred_spec, R)
draw_pts(centredSpecR, color=[0,0.5,0.7])
cmd.reset()
cmd.set("depth_cue", 0)
cmd.extend("compare_solutions", compare_solutions)
print('Compare Solutios Loaded')
def main():
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | misc/get_omp_version.cpp | .cpp | 307 | 11 | #include <unordered_map>
#include <cstdio>
#include <omp.h>
int main(int argc, char *argv[])
{
std::unordered_map<unsigned,std::string> map{
{200505,"2.5"},{200805,"3.0"},{201107,"3.1"},{201307,"4.0"},{201511,"4.5"}};
printf("Available OpenMP version: %s\n", map.at(_OPENMP).c_str());
return 0;
} | C++ |
3D | Parmeggiani-Lab/elfin | elfinpy/template.py | .py | 566 | 20 | #!/usr/bin/env python3
import argparse, sys
from utilities import *
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='Template Elfin Python script')
parser.add_argument('input') # Absence of dash denotes mandatory argument
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
# def main():
# raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/utilities.py | .py | 10,406 | 336 | """
Elfin data processing utilities module
"""
import inspect
import os
import sys
import code
import traceback as traceback_module
import json
import csv
import re
import numpy as np
RADII_TYPES = ['average_all', 'max_ca_dist', 'max_heavy_dist']
INF = float('inf')
TERM_TYPES = {'n', 'c'}
MOD_TYPES = {'single', 'hub'}
def check_mod_type(mod_type):
assert(mod_type.lower() in MOD_TYPES)
def check_term_type(term):
assert(term.lower() in TERM_TYPES)
def opposite_term(term):
return {'n':'c', 'c':'n'}.get(term.lower(), None)
def dict_diff(A, B):
if type(A) == list:
return not all(diff(a, b) for a, b in zip(A, B))
elif type(A) == dict:
return not all(diff(A[x], B[x]) for x in A if x in B)
else:
return A != B
# https://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary
def to_dict(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = to_dict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return to_dict(obj._ast())
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
return [to_dict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, to_dict(value, classkey))
for key, value in obj.__dict__.items()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
def get_rotation(angle_x=0, angle_y=0, angle_z=0):
"""https://en.wikipedia.org/wiki/Rotation_matrix
"""
radian_x = np.radians(angle_x)
radian_y = np.radians(angle_y)
radian_z = np.radians(angle_z)
rot_x = np.array([
[1, 0, 0],
[0, np.cos(radian_x), -np.sin(radian_x)],
[0, np.sin(radian_x), np.cos(radian_x)]
])
rot_y = np.array([
[np.cos(radian_y), 0, np.sin(radian_y)],
[0, 1, 0],
[-np.sin(radian_y), 0, np.cos(radian_y)]
])
rot_z = np.array([
[np.cos(radian_z), -np.sin(radian_z), 0],
[np.sin(radian_z), np.cos(radian_z), 0],
[0, 0, 1]
])
return np.matmul(a=np.matmul(a=rot_x, b=rot_y), b=rot_z)
def gen_pymol_txm(rot, tran):
"""Converts BioPython-style rotation and translation into pymol's
transformation matrix string.
Args:
- rot - Bio.PDB.Superimposer().rotran[0]
- tran - Bio.PDB.Superimposer().rotran[1]
Returns:
- _ - string of pymol's transformation matrix.
"""
rot_tp = np.transpose(rot)
rot_tp_tran = np.append(rot_tp, np.transpose([tran]), axis=1)
pymol_rot_mat = np.append(rot_tp_tran, [[0, 0, 0, 1]], axis=0)
return '[' + ', '.join(map(str, pymol_rot_mat.ravel())) + ']'
def int_ceil(float_num):
"""Ceil a float then turn it into an int."""
return int(np.ceil(float_num))
def int_floor(float_num):
"""Floor a float then turn it into an int."""
return int(np.floor(float_num))
def upsample(spec, pts):
"""Upsamples points to be the same number of points in specification. This
is code translated from Elfin core's C++ code.
"""
n_spec_points = len(spec)
more_points, fewer_points = (np.copy(spec), np.copy(pts))
# Compute longer shape total length
mp_total_length = 0.0
for i in range(1, n_spec_points):
mp_total_length += np.linalg.norm(more_points[i] - more_points[i - 1])
if mp_total_length == INF:
raise ValueError('Something fishy... mp_total_length is inf!')
fp_total_length = 0.0
for i in range(1, len(fewer_points)):
fp_total_length += np.linalg.norm(fewer_points[i] - fewer_points[i - 1])
if mp_total_length == INF:
raise ValueError('Something fishy... fp_total_length is inf!')
# Upsample fewer_points
upsampled = np.zeros([0, 3])
# First and last points are the same
upsampled = np.append(upsampled, [fewer_points[0]], axis=0)
mp_proportion = 0.0
fp_proportion = 0.0
mpi = 1
for i in range(1, len(fewer_points)):
base_fp_point = fewer_points[i - 1]
next_fp_point = fewer_points[i]
basefp_proportion = fp_proportion
fp_segment = np.linalg.norm(next_fp_point - base_fp_point) / fp_total_length
vec = next_fp_point - base_fp_point
fp_proportion += fp_segment
while mp_proportion <= fp_proportion and mpi < n_spec_points:
mp_segment = \
np.linalg.norm(more_points[mpi] - more_points[mpi - 1]) \
/ mp_total_length
if (mp_proportion + mp_segment) > fp_proportion:
break
mp_proportion += mp_segment
scale = (mp_proportion - basefp_proportion) / fp_segment
upsampled = np.append(upsampled, [base_fp_point + (vec * scale)], axis=0)
mpi += 1
# Sometimes the last node is automatically added
if len(upsampled) < n_spec_points:
upsampled = np.append(upsampled, [fewer_points[-1]], axis=0)
return upsampled
def float_approximates(float_a, float_b, error=1e-6):
"""Returns whether float a is approximately b within error tolerance"""
return abs(float_a-float_b) < error
def check_collision(**kwargs):
"""Tests whether a to-be-added node is too close to any node in partially or
completely formed shape.
Args:
- xDB - a dict containing the xDB data. Should have originated from
read_json().
- collision_measure - one of RADII_TYPES
- nodes - string list of module names
- new_node - string name the node to be tested
- shape - Nx(3x1 numpy array) list of node centre-of-masses
Returns:
- bool - whether or not the new node, when added to the shape, causes
collision.
"""
xdb = kwargs.pop('xdb')
collision_measure = kwargs.pop('collision_measure')
nodes = kwargs.pop('nodes')
new_node = kwargs.pop('new_node')
shape = kwargs.pop('shape')
new_com = xdb['double_data'][nodes[-1]][new_node]['com_b']
# previous node PAIR (not just single node!) is inherently non-colliding
for i in range(0, len(nodes) - 2):
com_dist = np.linalg.norm(shape[i] - new_com)
collision_dist = \
xdb['single_data'] \
[new_node]['radii'][collision_measure] + \
xdb['single_data'] \
[nodes[i]]['radii'][collision_measure]
if com_dist < collision_dist:
return True
return False
def com_dist_info(xdb):
"""Computes centre-of-mass distance information.
Args:
- xdb - a dict containing the xdb data. Should have originated from
read_json().
Returns:
- (_, _, _) - tuple containing average, min and max values for centre-of-mass
distances.
"""
all_tx = xdb['n_to_c_tx']
dists = [np.linalg.norm(tx['tran']) for tx in all_tx]
return np.average(dists), min(dists), max(dists)
def read_csv_points(csv_file):
"""A wrapper of read_csv() but returns as list of numpy array points."""
pts = []
with open(csv_file, 'r') as file:
pts = np.asarray(
[[float(n) for n in re.split(', *| *', l.strip())] \
for l in file.read().split('\n') if len(l) > 0])
return pts
def read_csv(read_path, delim=','):
"""Reads a generic CSV file.
Args:
- read_path - string path to read from.
- delim - delimiter to use for the CSV format.
Returns:
- rows - list of rows where each row is a string list of cell values.
"""
rows = []
with open(read_path) as csv_file:
sreader = csv.reader(csv_file, delimiter=delim)
for row in sreader:
rows.append([c.strip() for c in row])
return rows
def save_points_as_csv(**kwargs):
"""Saves a list of points into a CSV file.
Args:
- points - Nx(3x1 numpy array) list to be saved.
- save_path - string path to save to.
- delim - delimiter to use for the CSV format.
"""
points = kwargs.pop('points')
save_path = kwargs.pop('save_path')
delim = kwargs.pop('delim', ' ')
with open(save_path, 'wb') as file:
writer = csv.writer(file, delimiter=delim)
for row in points:
writer.writerow(row)
def read_json(read_path):
"""Reads a JSON file adn returns a dict."""
with open(read_path, 'r') as file:
return json.load(file)
def make_dir(directory):
"""Creates directory if does not exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def pause_code(frame=None):
"""Pause execution and drop into interactive mode for debugging. This is
intended to be manually inserted into area of code where debugging is
needed.
Args:
- frame - specify frame in which the globals and locals are to be debugged.
"""
print('\n------------------pause_code()------------------')
if frame is None:
# Use current frame (one above the exception wrapper)
frame = inspect.currentframe().f_back
fi = inspect.getframeinfo(frame)
print('Where: {loc}:{line}'.format(loc=fi.filename, line=fi.lineno))
print('What: \n{code}'.format(code=fi.code_context[0]))
name_space = dict(frame.f_globals)
name_space.update(frame.f_locals)
code.interact(local=name_space)
def safe_exec(func, *args, **kwargs):
"""Execute func and drops into interactive mode for debugging if an exception
is raised.
Args:
- func - the function handle to be called.
- *args - args to be expanded for func.
"""
try:
func(*args, **kwargs)
except Exception as ex:
print('\n------------------safe_exec() caught exception------------------')
print(ex)
# Find last (failed) inner frame
_, _, traceback = sys.exc_info()
last_frame = \
traceback.tb_next.tb_next.tb_next \
if traceback and traceback.tb_next and traceback.tb_next.tb_next \
else traceback
if last_frame:
frame = last_frame.tb_frame
traceback_module.print_exc()
pause_code(frame)
else:
print('No frame to pause at...')
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ == '__main__':
main()
| Python |
3D | Parmeggiani-Lab/elfin | elfinpy/stat_xdb.py | .py | 2,667 | 103 | #!/usr/bin/env python3
import argparse, sys
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import matplotlib
import networkx as nx
matplotlib.use('Agg')
plt.ioff()
from utilities import *
def parse_args(args):
parser = argparse.ArgumentParser(description='Prints module radii stat from xdb')
return parser.parse_args(args)
def convert_to_hex(rgba_color) :
red = int(rgba_color[0]*255)
green = int(rgba_color[1]*255)
blue = int(rgba_color[2]*255)
return '#%02x%02x%02x' % (red, green, blue)
def show_graph_with_labels(adjacency_matrix, labels):
labels_dict = {i: v for i, v in enumerate(labels)}
adjacency_matrix = np.asarray(adjacency_matrix)
G = nx.from_numpy_matrix(adjacency_matrix, create_using=nx.MultiDiGraph())
D = nx.degree(G)
D = [(D[node]+1) * 20 for node in G.nodes()]
fig_size = 15
plt.figure(figsize=(fig_size, fig_size))
pos = nx.spring_layout(G, k=1.9)
node_sizes = [1.3 * v for v in D]
nx.draw_networkx_nodes(G,
pos,
node_size=node_sizes,
node_color='pink')
nx.draw_networkx_labels(G,
pos,
labels_dict,
font_size=13,
font_color='black',
font_weight='bold')
nx.draw_networkx_edges(G,
pos,
edge_color='gray',
arrowstyle='->',
arrowsize=30,
width=1)
plt.axis('off')
plt.show(block=False)
plt.savefig('xdb_adj_mat.png', bbox_inches='tight')
def main(test_args=None):
args = parse_args(sys.argv[1:] if test_args is None else test_args)
xdb = read_json('resources/xdb.json')
# Print centre-of-mass stats
(avg_d, min_d, max_d) = com_dist_info(xdb)
print('Distances avg: {}, min: {}, max: {}'.format(avg_d, min_d, max_d))
# Print adjacency matrix
singles = xdb['modules']['singles']
hubs = xdb['modules']['hubs']
n_singles = len(singles)
n_hubs = len(hubs)
n_modules = n_singles + n_hubs
all_names = list(singles.keys()) + list(hubs.keys())
name_to_idx = {name: i for i, name in enumerate(all_names)}
adjmat = [[0] * n_modules for _ in range(0, n_modules)]
# print('-----------Adjacency Matrix-----------')
for tx in xdb['n_to_c_tx']:
mod_a, mod_b = tx['mod_a'], tx['mod_b']
id_a, id_b = name_to_idx[mod_a], name_to_idx[mod_b]
adjmat[id_a][id_b] = 1.0
# print(','.join((mod_a, mod_b)))
show_graph_with_labels(adjmat, all_names)
# print('Names:')
# print(','.join(all_names))
# for row in adjmat:
# print(','.join((str(i) for i in row)))
if __name__ =='__main__':
safe_exec(main) | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/dbgen.py | .py | 24,008 | 660 | #!/usr/bin/env python3
import glob
import numpy as np
import codecs
import json
import argparse
import shutil
from collections import defaultdict
from collections import OrderedDict
import Bio.PDB
from utilities import *
from pdb_utilities import *
nested_dict = lambda: defaultdict(nested_dict)
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Generates the xdb database from preprocessed single and double modules.')
parser.add_argument('--relaxed_pdbs_dir', default='./resources/pdb_prepped/')
parser.add_argument('--metadata_dir', default='./resources/metadata/')
parser.add_argument('--output', default='./resources/xdb.json')
parser.add_argument('--aligned_pdb_dir', default='./resources/pdb_aligned/')
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
XDBGenerator(
args.relaxed_pdbs_dir,
args.metadata_dir,
args.aligned_pdb_dir,
args.output
).run()
class XDBGenerator:
def __init__(
self,
relaxed_pdbs_dir,
metadata_dir,
aligned_pdb_dir,
out_file
):
self.relaxed_pdbs_dir = relaxed_pdbs_dir
module_types = ['doubles', 'singles', 'hubs']
shutil.move('metadata','resources/metadata' )
make_dir(aligned_pdb_dir)
for mt in module_types:
make_dir(aligned_pdb_dir + '/{}/'.format(mt))
self.hub_info = read_json(metadata_dir + '/hub_info.json')
self.aligned_pdb_dir = aligned_pdb_dir
self.out_file = out_file
self.si = Bio.PDB.Superimposer()
self.modules = nested_dict()
self.n_to_c_tx = []
self.hub_tx = []
# Cache in memory because disk I/O is really heavy here
self.single_pdbs = defaultdict(dict)
self.double_pdbs = defaultdict(dict)
def find_tip(self, term, struct, chain_id):
term = term.lower()
assert(term in {'c', 'n'})
chain = get_chain(struct, chain_id=chain_id)
residues = chain.child_list
n = len(residues)
divider = 6 # The smaller the divider, the closer to terminus.
assert(n > 0)
if term == 'n':
start_idx, end_idx = 0, n//divider
else:
start_idx, end_idx = (divider-1)*n//divider, n
sum_coord = np.asarray([0., 0., 0.])
for r in residues[start_idx:end_idx]:
sum_coord += r['CA'].get_coord().astype('float64')
tip_vector = sum_coord/(end_idx - start_idx - 1)
return tip_vector.tolist()
def create_tx(self, mod_a, a_chain, mod_b, b_chain, rot, tran):
tx_entry = \
OrderedDict([
('mod_a', mod_a),
('mod_a_chain', a_chain),
('mod_b', mod_b),
('mod_b_chain', b_chain),
('rot', rot.tolist()),
('tran', np.asarray(tran).tolist())
])
return tx_entry
def process_hub(self, file_name):
"""Aligns a hub module to its A component (chain A), then computes the
transform for aligning itself to its other components.
"""
# Load structures
hub = read_pdb(file_name)
# Centre the hub
self.move_to_origin(hub)
hub_fusion_factor = 4
hub_name = os.path.basename(file_name).replace('.pdb', '')
hub_meta = self.hub_info.get(hub_name, None)
assert(hub_meta != None)
if hub_meta is None:
raise ValueError('Could not get hub metadata for hub {}\n'.format(hub_name))
# Create module entry first
comp_data = hub_meta['component_data']
del hub_meta['component_data']
hub_meta['chains'] = {
c.id: {
'single_name': comp_data[c.id]['single_name'],
'n': nested_dict(),
'n_tip': nested_dict(),
'c': nested_dict(),
'c_tip': nested_dict(),
'n_residues': len(c.child_list)
} for c in hub.get_chains()
}
hub_meta['radii'] = self.get_radii(hub)
self.modules['hubs'][hub_name] = hub_meta
# The current process does not allow hub to hub connections. Maybe this
# need to be changed?
for hub_chain_id in comp_data:
chain_data = comp_data[hub_chain_id]
comp_name = chain_data['single_name']
if chain_data['c_free']:
b_name_gen = (tx['mod_b'] for tx in self.n_to_c_tx if tx['mod_a'] == comp_name)
for single_b_name in b_name_gen:
# Compute the transformation required to move a single
# module B from its aligned position to the current hub's
# "finger tip".
#
# Here we do not use the second quadrant method, because during
# stitching none of the hubs' residues get changed. The stitching
# will take place at the end of the hub's component's terminal.
rc_hub_a = get_chain_residue_count(hub, hub_chain_id)
rc_dbl_a = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_dbl_a) / hub_fusion_factor)
double = self.double_pdbs[comp_name][single_b_name]
# Compute transformation matrix.
# Find transform between component single and single b.
hub_single_chain_id = \
list(self.single_pdbs[comp_name].get_chains())[0].id
single_b_chain_id = \
list(self.single_pdbs[single_b_name].get_chains())[0].id
dbl_tx_id = self.modules['singles'][comp_name]['chains'] \
[hub_single_chain_id]['c'] \
[single_b_name][single_b_chain_id]
assert(dbl_tx_id is not None)
dbl_n_to_c = self.n_to_c_tx[dbl_tx_id]
dbl_tx = np.vstack(
(np.hstack((dbl_n_to_c['rot'], np.transpose([dbl_n_to_c['tran']]))),
[0,0,0,1])
)
# Find transform from hub to single A.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=rc_hub_a - fusion_count,
moving_resi_offset=rc_dbl_a - fusion_count,
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
comp_to_single_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub's component frame.
# 2. Shift to double B frame.
dbl_raised_tx = np.matmul(comp_to_single_tx, dbl_tx);
# Decompose transform.
rot = dbl_raised_tx[:3, :3]
tran = dbl_raised_tx[:3, 3]
tx = self.create_tx(
hub_name,
hub_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c'] \
[single_b_name][single_b_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c_tip'] = \
self.find_tip('c', hub, hub_chain_id)
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'] \
[hub_name][hub_chain_id] = tx_id
self.hub_tx.append(tx)
if chain_data['n_free']:
a_name_gen = (tx['mod_a'] for tx in self.n_to_c_tx if tx['mod_b'] == comp_name)
for single_a_name in a_name_gen:
# Same as c_free except comp acts as single b
rc_a = get_pdb_residue_count(self.single_pdbs[single_a_name])
rc_b = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_b) / hub_fusion_factor)
double = self.double_pdbs[single_a_name][comp_name]
# Compute transformation matrix.
# Find transform from double component B to hub component.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=0, # start matching from the n-term of hub component, which is index 0
moving_resi_offset=rc_a, # start matching at the beginning of single b in the double
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
dbl_to_hub_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub frame - do nothing; just dbl_to_hub_tx.
# Decompose transform.
rot = dbl_to_hub_tx[:3, :3]
tran = dbl_to_hub_tx[:3, 3]
single_a_chain_id = \
list(self.single_pdbs[single_a_name].get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
hub_name,
hub_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'] \
[hub_name][hub_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n'] \
[single_a_name][single_a_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n_tip'] = \
self.find_tip('n', hub, hub_chain_id)
self.hub_tx.append(tx)
save_pdb(
struct=hub,
path=self.aligned_pdb_dir + '/hubs/' + hub_name + '.pdb'
)
def process_double(self, file_name):
"""Aligns a double module to its A component and then computes the transform
for aligning to its B component. Saves aligned structure to output folder.
"""
# Step 1: Load structures
double = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(double.get_chains())) == 1)
double_name = file_name.split('/')[-1].replace('.pdb', '')
single_a_name, single_b_name = double_name.split('-')
single_a = self.single_pdbs[single_a_name]
single_b = self.single_pdbs[single_b_name]
rc_a = get_pdb_residue_count(single_a)
rc_b = get_pdb_residue_count(single_b)
rc_double = get_pdb_residue_count(double)
rc_a_half = int_floor(float(rc_a)/2)
rc_b_half = int_ceil(float(rc_b)/2)
# fusion_factor should be deprecated in favour of "core range".
dbl_fusion_factor = 8
fusion_count_a = int_ceil(float(rc_a) / dbl_fusion_factor)
fusion_count_b = int_ceil(float(rc_b) / dbl_fusion_factor)
# Step 2: Move double to align with the first single.
self.align(
moving=double,
fixed=single_a,
moving_resi_offset=rc_a_half - fusion_count_a,
fixed_resi_offset=rc_a_half - fusion_count_a,
match_count=fusion_count_a
)
# Step 3: Get COM of the single_b as seen in the double.
com_b = self.get_centre_of_mass(
single_b,
mother=double,
child_resi_offset=rc_b_half - fusion_count_b,
mother_resi_offset=rc_a + rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Step 4: Get transformation of single B to part B inside double.
#
# Double is already aligned to first single so there is no need for
# the first transformation.
#
# Only align residues starting from the middle of single B because
# the middle suffers the least from interfacing displacements.
rot, tran = self.get_rot_trans(
moving=double,
fixed=single_b,
moving_resi_offset=rc_a + rc_b_half - fusion_count_b,
fixed_resi_offset=rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
# Inverse result transform because we want the tx that takes the
# single B module to part B inside double.
tmp_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
inv_tx = np.linalg.inv(tmp_tx);
# Decompose transform.
rot = inv_tx[:3, :3]
tran = inv_tx[:3, 3]
# Step 5: Save the aligned molecules.
#
# Here the PDB format adds some slight floating point error. PDB is
# already phased out so and we should really consider using mmCIF for
# all modules.
save_pdb(
struct=double,
path=self.aligned_pdb_dir + '/doubles/' + double_name + '.pdb'
)
single_a_chain_id = list(single_a.get_chains())[0].id
single_b_chain_id = list(single_b.get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'][single_b_name][single_b_chain_id] = tx_id
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'][single_a_name][single_a_chain_id] = tx_id
self.n_to_c_tx.append(tx)
# Cache structure in memory
self.double_pdbs[single_a_name][single_b_name] = double
def process_single(self, file_name):
"""Centres a single module and saves to output folder."""
single_name = file_name.split('/')[-1].replace('.pdb', '')
single = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(single.get_chains())) == 1)
# Check that there is only one chain
chain_list = list(single.get_chains())
if len(chain_list) != 1:
raise ValueError('Single PDB contains {} chains!\n'.format(len(chain_list)))
self.move_to_origin(single)
save_pdb(
struct=single,
path=self.aligned_pdb_dir + '/singles/' + single_name + '.pdb'
)
self.modules['singles'][single_name] = {
'chains': {
chain_list[0].id: {
'n': nested_dict(),
'c': nested_dict(),
'n_residues': len(chain_list[0].child_list)
}
},
'radii': self.get_radii(single)
}
# Cache structure in memory
self.single_pdbs[single_name] = single
def dump_xdb(self):
"""Writes alignment data to a json file."""
to_dump = \
OrderedDict([
('modules', self.modules),
('n_to_c_tx', self.n_to_c_tx)
])
json.dump(to_dump,
open(self.out_file, 'w'),
separators=(',', ':'),
ensure_ascii=False,
indent=4)
def get_centre_of_mass(
self,
child,
mother=None,
child_resi_offset=0,
mother_resi_offset=0,
match_count=-1
):
"""Computes centre-of-mass coordinate of a Bio.PDB.Structure.Structure.
Args:
- child - Bio.PDB.Structure.Structure for which the centre-of-mass should
be calculated.
- mother - Bio.PDB.Structure.Structure onto which child is to be first
aligned.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
Returns:
- com - 3x1 numpy array of the centre-of-mass.
"""
CAs = [r['CA'].get_coord().astype('float64') for r in child.get_residues()]
com = np.mean(CAs, axis=0)
if mother is not None:
# This is for finding COM of a single inside a double
_, tran = self.get_rot_trans(
moving=child,
fixed=mother,
moving_resi_offset=child_resi_offset,
fixed_resi_offset=mother_resi_offset,
match_count=match_count
)
com += tran
return com
def get_radii(self, pose):
"""Computes three different measures of the radius.
Args:
- pose - Bio.PDB.Structure.Structure
Returns:
- _ - an dict containing: average of all atoms distances, max
carbon alpha distance, and max heavy atom distance, each calculated
against the centre-of-mass.
"""
if not pose.at_origin:
raise ValueError('get_radii() must be called with centered modules.')
natoms = 0;
rg_sum = 0;
max_ca_dist = 0;
nHeavy = 0;
max_heavy_dist = 0;
for a in pose.get_atoms():
dist = np.linalg.norm(
a.get_coord().astype('float64'));
rg_sum += dist;
if(a.name =='CA'):
max_ca_dist = max(max_ca_dist, dist);
if(a.element != 'H'):
max_heavy_dist = max(max_heavy_dist, dist);
nHeavy = nHeavy + 1;
natoms = natoms + 1;
average_all = rg_sum / natoms;
return {
'average_all': average_all,
'max_ca_dist': max_ca_dist,
'max_heavy_dist': max_heavy_dist
}
def move_to_origin(self, pdb):
"""Centres a Bio.PDB.Structure.Structure to the global origin."""
com = self.get_centre_of_mass(pdb)
# No rotation - just move to centre
pdb.transform([[1,0,0],[0,1,0],[0,0,1]], -com)
# Tag the pdb
pdb.at_origin = True
def align(
self,
**kwargs
):
"""Moves the moving Bio.PDB.Structure.Structure to the fixed
Bio.PDB.Structure.Structure.
"""
moving = kwargs.pop('moving')
fixed = kwargs.pop('fixed')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
rot, tran = self.get_rot_trans(
moving=moving,
fixed=fixed,
moving_resi_offset=moving_resi_offset,
fixed_resi_offset=fixed_resi_offset,
match_count=match_count
)
# BioPython's own transform() deals with the inversed rotation
# correctly.
moving.transform(rot, tran)
def get_rot_trans(
self,
**kwargs
):
"""Computes the rotation and transformation matrices using BioPython's
superimposer.
Args:
- moving - the Bio.PDB.Structure.Structure that is to move towards the
other (fixed).
- fixed - the Bio.PDB.Structure.Structure that the other (moving) is to
align to.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
----IMPORT NOTE----
The rotation from BioPython is the second dot operand instead of the
conventional first dot operand.
This means instead of the standard R*v + T, the actual transform is done
with v'*R + T.
Hence, the resultant rotation matrix might need transposing if not
passed back into BioPython.
----IMPORT NOTE----
Returns:
- (rot, tran) - a tuple containing the rotation and transformation
matrices.
"""
moving = kwargs.pop('moving')
moving_chain_id = kwargs.pop('moving_chain_id', 'A')
fixed = kwargs.pop('fixed')
fixed_chain_id = kwargs.pop('fixed_chain_id', 'A')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
moving_chain = get_chain(moving, chain_id=moving_chain_id)
moving_residues = moving_chain.child_list \
[moving_resi_offset:(moving_resi_offset+match_count)]
ma = [r['CA'] for r in moving_residues]
fixed_chain = get_chain(fixed, chain_id=fixed_chain_id)
fixed_residues = fixed_chain.child_list \
[fixed_resi_offset:(fixed_resi_offset+match_count)]
fa = [r['CA'] for r in fixed_residues]
self.si.set_atoms(fa, ma)
return self.si.rotran
def run(self):
"""Calls the processing functions for singles, doubles, and hubs in that
order. Dumps alignment data into json database.
"""
# Single modules
single_files = glob.glob(self.relaxed_pdbs_dir + '/singles/*.pdb')
n_singles = len(single_files)
for i in range(0, n_singles):
print('Centering single [{}/{}] {}' \
.format(i+1, n_singles, single_files[i]))
self.process_single(single_files[i])
# Double modules
double_files = glob.glob(self.relaxed_pdbs_dir + '/doubles/*.pdb')
nDoubles = len(double_files)
for i in range(0, nDoubles):
print('Aligning double [{}/{}] {}' \
.format(i+1, nDoubles, double_files[i]))
self.process_double(double_files[i])
# Hub modules
hub_files = glob.glob(self.relaxed_pdbs_dir + '/hubs/*.pdb')
nHubs = len(hub_files)
for i in range(0, nHubs):
print('Aligning hub [{}/{}] {}' \
.format(i+1, nHubs, hub_files[i]))
self.process_hub(hub_files[i])
self.n_to_c_tx += self.hub_tx
print('Total: {} singles, {} doubles, {} hubs'.format(n_singles, nDoubles, nHubs))
self.dump_xdb()
if __name__ =='__main__':
safe_exec(main)
| Python |
3D | Parmeggiani-Lab/elfin | elfinpy/elfin_node.py | .py | 1,926 | 53 | import numpy as np
import warnings
class ElfinNode(object):
"""
A single module instance and stores info about connectivity
"""
def __init__(
self,
**kwargs
):
self.id = kwargs.pop('id')
self.name = kwargs.pop('name')
self.trim = kwargs.pop('trim', {'n': False, 'c': False})
# Default is to cap the end that is not trimmed
self.cap = kwargs.pop('cap', (not self.trim['n'], not self.trim['c']))
self.cterm_node_id = kwargs.pop('cterm_node_id', -1)
self.rot = kwargs.pop('rot', [[1,0,0],[0,1,0],[0,0,1]])
self.tran = kwargs.pop('tran', [0,0,0])
# Error checking
if self.id < 0:
raise ValueError('Bad ElfinNode id: {}'.format(self.id))
if len(self.trim) != 2 or \
'n' not in self.trim or 'c' not in self.trim:
raise ValueError('Bad ElfinNode trimming flags: {}'.format(self.trim))
if not self.trim['n'] and not self.trim['c']:
warnings.warn('ElfinNode (id={}, name={}) '
'trimming flags are FALSE for both n and c terms. '
'This only happens if there is a single module network or chain. '
'Proceed only if this is deliberate.'.format(self.id, self.name))
for term in {'n', 'c'}:
if self.trim[term] and self.cap[term]:
raise ValueError('Cannot cap a trimmed end({}): name={}, id={}'
.format(term, self.name, self.id))
def __repr__(self):
return 'ElfinNode: ID={}, Name={}'.format(self.id, self.name)
def transform(self, rot, tran):
self.rot = (np.dot(self.rot, np.transpose(rot))).tolist()
self.tran = (np.dot(self.tran, np.transpose(rot)) + tran).tolist()
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/v1_design_convert.py | .py | 2,830 | 92 | #!/usr/bin/env python3
import argparse, sys
import copy
try:
from elfin_graph import ElfinGraph
from elfin_node import ElfinNode
from utilities import *
except ImportError as e:
from .elfin_graph import ElfinGraph
from .elfin_node import ElfinNode
from .utilities import *
def compute_old_graph_txm(xdb, graph):
nodes = graph.nodes
double_data = xdb['double_data']
for i in range(len(nodes)-1):
node_a = nodes[i]
node_b = nodes[i+1]
rel = double_data[node_a.name][node_b.name]
for j in range(i+1):
nodes[j].transform(rel['rot'], rel['tran'])
def parse_args(args):
parser = argparse.ArgumentParser(description='Converts old Elfin core intermediate output into new format');
parser.add_argument('input') # No dash means mandatory
parser.add_argument('--output')
parser.add_argument('--xdb_path', default='resources/xdb.json')
parser.add_argument('--multichain_test', action='store_true')
return parser.parse_args(args)
def v1_to_v2(input_json, xdb_path, multichain_test=False):# Elfin core output
# Make sure we're working with the old format
keys = input_json.keys()
if not 'nodes' in keys:
print('Error: input file is not a v1 elfin solution file.')
exit(1)
n_nodes = len(input_json['nodes'])
nodes = [
ElfinNode(
id=i,
name=el,
trim=[(False if i == 0 else True), (False if i == n_nodes - 1 else True)],
cterm_node_id=((i+1) if i < n_nodes - 1 else -1)
) for (i, el) in enumerate(input_json['nodes'])
]
graph = ElfinGraph('c1', nodes) # c1 for chain number 1
graphs = [graph]
assert(len(graphs) == 1)
xdb = read_json(xdb_path)
for g in graphs:
compute_old_graph_txm(xdb, g)
if multichain_test:
graphs.append(copy.deepcopy(graph))
# Note: flipping z direction can cause problems in PyMol
# visualisation (can't view as cartoon)
graphs[0].transform([[-1,0,0],[0,-1,0],[0,0,1]],[100,100,0])
graphs[1].transform([[1,0,0],[0,1,0],[0,0,1]],[-100,-100,0])
graphs[1].name = 'c2'
return graphs
def main(test_args=None):
print('Deprecated. For code reference only.')
exit()
args = parse_args(sys.argv[1:] if test_args is None else test_args)
graphs = v1_to_v2(
read_json(args.input),
args.xdb_path,
args.multichain_test)
output_file = args.output
if output_file == None:
output_file = args.input.replace('.json', '.v2.json')
with open(output_file, 'w') as ofp:
json.dump(graphs, ofp, default=lambda o: o.__dict__)
print('Saved to: ' + output_file)
if __name__ == '__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/pdb_utilities.py | .py | 3,134 | 101 | import Bio.PDB
DIRTY_ATOMS = {'1H', '2H', '3H', 'OXT'}
BACKBONE_NAMES = {'N', 'CA', 'C', 'O', 'H', 'HA'}
def get_pdb_residue_count(pdb):
"""Returns the residue count of a Bio.PDB.Structure.Structure."""
return sum([len(c.child_list) for c in pdb.child_list[0].child_list])
def get_chain_residue_count(struct, chain_id):
"""Returns the residue count of a Bio.PDB.Structure.Structure."""
return len(get_chain(struct, chain_id).child_list)
def copy_residues(pdb, chain_ids=None):
return [r.copy() for r in get_residues(pdb, chain_ids)]
def get_residues(pdb, chain_ids=None):
"""Returns returns residues copied from a PDB.
Args:
- pdb - Bio.PDB.Structure.Structure.
- chain_ids - strip residues from these specific chain_ids only.
Returns:
- residues - a list of Bio.PDB.Residue.Residue.
"""
residues = []
for model in pdb:
for chain in model:
if chain_ids == None or chain.id in chain_ids:
residues.extend(chain.child_list)
return residues
def get_chain(struct, chain_id='A'):
"""Returns a specific chain from a Bio.PDB.Structure.Structure."""
return struct.child_list[0].child_dict[chain_id]
def get_chains(struct):
"""Returns all chains of a Bio.PDB.Structure.Structure."""
return struct.child_list[0].child_list
def read_pdb(
read_path,
pdb_name=None
):
"""Reads a PDB file and returns a BioPython structure.
Args:
- read_path - PDB string file path to read from.
- pdb_name - a string to set as the name of the Bio.PDB.Structure.Structure.
Returns:
- structure - Bio.PDB.Structure.Structure.
"""
if pdb_name == None:
pdb_name = read_path.split('/')[-1].replace('.', '_')
parser = Bio.PDB.PDBParser(PERMISSIVE=False)
structure = parser.get_structure(pdb_name, read_path)
return structure
def save_cif(**kwargs):
"""Saves a Bio.PDB.Structure.Structure as a CIF file. Does not automatically
append .cif extension.
Args:
- struct - Bio.PDB.Structure.Structure to be saved.
- path - CIF string file path.
"""
struct = kwargs.pop('struct')
path = kwargs.pop('path')
with open(path, 'w') as file:
io = Bio.PDB.mmcifio.MMCIFIO()
io.set_structure(struct)
io.save(file)
# Temporary fix for CIF files not getting parsed properly by Rosetta: add
# a dummy section at the end. ("Note that the final table in the cif file
# may not be recognized - adding a dummy entry (like `_citation.title
# ""`) to the end of the file may help.")
file.writelines('_citation.title "Elfin"')
def save_pdb(**kwargs):
"""Saves a Bio.PDB.Structure.Structure as a PDB file.
Args:
- struct - Bio.PDB.Structure.Structure to be saved.
- save_path - string file path.
"""
struct = kwargs.pop('struct')
path = kwargs.pop('path')
io = Bio.PDB.PDBIO()
io.set_structure(struct)
io.save(path)
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/preprocess.py | .py | 8,078 | 213 | #!/usr/bin/env python3
import argparse, sys
import subprocess, glob
from utilities import *
from pdb_utilities import *
def merge_chains(pdb):
"""Merge all chains in a PDB structure and re-number the residue IDs
accordingly
Args:
- pdb - Bio.PDB.Structure.Structure
Returns:
Bio.PDB.Structure.Structure - the modified PDB, used for chaining
calls
"""
new_chain = Bio.PDB.Chain.Chain('A')
rid = 1
for r in copy_residues(pdb):
r.id = (r.id[0], rid, r.id[2])
new_chain.add(r)
rid += 1
# Remove old chains from model
for model in pdb:
oc_ids = []
for chain in model:
oc_ids.append(chain.id)
for oc_id in oc_ids:
model.detach_child(oc_id)
model.add(new_chain)
return pdb # for chaining calls
def cleanse_atoms(pdb):
"""Delete dirty atoms from the PDB structure
Args:
- pdb - Bio.PDB.Structure.Structure
Returns:
- Bio.PDB.Structure.Structure - the modified PDB, used for chaining
calls
"""
for c in get_chains(pdb):
for r in c.child_list:
bad_atoms = [a for a in r if a.name in DIRTY_ATOMS]
for ba in bad_atoms:
r.detach_child(ba.name)
return pdb
def preprocess_double(double_file):
"""Merge chains, cleanse atoms if double_file is a simple double (no junction).
If double_file is a complex double (one with a junction), replace its
interfacing residues with those of a simple double, then merge chains and
cleanse atoms.
Loop-replacement is necessary because we have experimental data that confirm
interface atom positions in simple doubles but not in compound doubles. If
this is not done, the relaxation of database PDBs will result in sever
bending and dislocation in compound doubles.
Args:
- double_file - string path of the input double PDB file
Returns:
- Bio.PDB.Structure.Structure - preprocessed double PDB
"""
double_name = os.path.basename(double_file).replace('.pdb', '')
underscores = [double_name.find('_'), double_name.rfind('_')]
if underscores[0] == -1:
print('Input {} is a simple double and does not need loop replacement'.format(double_file))
return cleanse_atoms(merge_chains(read_pdb(double_file)))
dash_idx = double_name.rfind('-')
sdouble_first = dash_idx < underscores[0] and dash_idx < underscores[1]
double_name_halves = [double_name[:dash_idx], double_name[dash_idx+1:]]
sdouble_name = ''
sdouble_name += double_name_halves[0][double_name_halves[0].rfind('_')+1:] \
if double_name_halves[0].rfind('_') != -1 else double_name_halves[0]
sdouble_name += '-'
sdouble_name += double_name_halves[1][:double_name_halves[1].find('_')] \
if double_name_halves[1].find('_') != -1 else double_name_halves[1]
sdouble_file = double_file[:double_file.rfind('/')+1] + sdouble_name + '.pdb'
# Load PDBs
double = read_pdb(double_file)
double_chains = double.child_list[0].child_list
assert(len(double_chains) == 2)
sdouble = read_pdb(sdouble_file) #sdouble is the simple double
sdouble_chains = sdouble.child_list[0].child_list
assert(len(sdouble_chains) == 2)
# Get residue counts
sdouble_r_count = get_pdb_residue_count(sdouble)
# Compute interface residue range
sdouble_start_idx = int(np.ceil(sdouble_r_count*0.375))+1 # 0.375 is 75% of first single
sdouble_end_idx = int(np.floor(sdouble_r_count*0.625))-1 # 0.625 is 25% of second single
sdouble_end_offset = sdouble_end_idx - len(sdouble_chains[0].child_list)
# Find simple double middle residues
sdouble_chain_lens = [len(c.child_list) for c in sdouble_chains]
sdouble_mid_res = sdouble_chains[0].child_list[sdouble_start_idx:] + \
sdouble_chains[1].child_list[:sdouble_end_offset]
sdouble_atoms = [a for r in sdouble_mid_res for a in r.child_list if a.name == 'CA']
# Find first half of the residues
double_chain_lens = [len(c.child_list) for c in double_chains]
double_start_offset = double_chain_lens[0]-(sdouble_chain_lens[0]-sdouble_start_idx)
double_mid_res = \
(double_chains[0].child_list[sdouble_start_idx:] if sdouble_first else \
double_chains[0].child_list[double_start_offset:]) + \
double_chains[1].child_list[:sdouble_end_offset]
double_atoms = [a for r in double_mid_res for a in r.child_list if a.name == 'CA']
# Superimpose double onto sdouble
si = Bio.PDB.Superimposer()
si.set_atoms(sdouble_atoms, double_atoms)
double.transform(*si.rotran)
# Merge chains and remove bad atoms
cleanse_atoms(merge_chains(double))
cleanse_atoms(merge_chains(sdouble))
# Replace double residues where it should be sdouble residues
double_residues = get_chain(double).child_list
sdouble_residues = get_residues(sdouble)
for rIdx in range(sdouble_start_idx, sdouble_end_idx):
offset_r_idx = rIdx + (0 if sdouble_first else double_chain_lens[0]-sdouble_chain_lens[0])
old_r_id = double_residues[offset_r_idx].id
double_residues[offset_r_idx] = sdouble_residues[rIdx].copy()
double_residues[offset_r_idx].id = old_r_id
return double
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='Preprocess all raw single and double PDBs');
parser.add_argument('--input_dir', default='./pdb_raw/')
parser.add_argument('--output_dir', default='./resources/pdb_prepped/')
parser.add_argument('--output_dir_cap', default='./resources/pdb_cappings/')
parser.add_argument('--dry_run', action='store_true')
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
if args.input_dir is None or args.output_dir is None or args.output_dir_cap is None:
ap.print_help()
sys.exit(1)
if not args.dry_run:
make_dir(args.output_dir)
double_output_dir = args.output_dir + '/doubles/'
make_dir(double_output_dir)
single_output_dir = args.output_dir + '/singles/'
make_dir(single_output_dir)
hub_output_dir = args.output_dir + '/hubs/'
make_dir(hub_output_dir)
cap_output_dir = args.output_dir_cap
make_dir(cap_output_dir)
# Doubles
doubleFiles = glob.glob(args.input_dir + '/doubles/*.pdb')
N = len(doubleFiles)
for i in range(N):
double_file = doubleFiles[i]
print('Prepping double [{}/{}] {}'.format(i+1, N, double_file))
double = preprocess_double(double_file)
if not args.dry_run:
save_pdb(struct=double, path=double_output_dir + '/' + os.path.basename(double_file))
# Singles
singleFiles = glob.glob(args.input_dir + '/singles/*.pdb')
N = len(singleFiles)
for i in range(N):
single_file = singleFiles[i]
print('Prepping single [{}/{}] {}'.format(i+1, N, single_file))
# Singles need nothing other than cleansing
single = cleanse_atoms(read_pdb(single_file))
if not args.dry_run:
save_pdb(struct=single, path=single_output_dir + '/' + os.path.basename(single_file))
# Hubs
hubFiles = glob.glob(args.input_dir + '/hubs/*.pdb')
N = len(hubFiles)
for i in range(N):
hub_file = hubFiles[i]
print('Prepping hub [{}/{}] {}'.format(i+1, N, hub_file))
hub = cleanse_atoms(read_pdb(hub_file))
if not args.dry_run:
save_pdb(struct=hub, path=hub_output_dir + '/' + os.path.basename(hub_file))
# Caps
capFiles = glob.glob(args.input_dir + '/cappings/*.pdb')
N = len(capFiles)
for i in range(N):
cap_file = capFiles[i]
print('Prepping cap [{}/{}] {}'.format(i+1, N, cap_file))
cap = cleanse_atoms(read_pdb(cap_file))
if not args.dry_run:
save_pdb(struct=cap, path=cap_output_dir + os.path.basename(cap_file))
if __name__ == '__main__':
main()
| Python |
3D | Parmeggiani-Lab/elfin | elfinpy/__init__.py | .py | 0 | 0 | null | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/extract_solution.py | .py | 4,164 | 115 | #!/usr/bin/env python3
# This script takes a solver output JSON and exports a specific solution
import sys
import argparse
try:
from utilities import *
except ImportError as e:
from .utilities import *
def parse_args(args):
desc = 'Exports a specific solution from a solver output JSON.'
parser = argparse.ArgumentParser(description=desc);
parser.add_argument('input_path')
parser.add_argument('output_path')
return parser.parse_args(args)
def main(test_args=None):
args = parse_args(sys.argv[1:] if test_args is None else test_args)
se = SolutionExtractor(args.input_path, args.output_path)
se.extract()
class SolutionExtractor:
def __init__(self, input_path, output_path):
self.input_path = input_path
self.output_path = output_path
def extract(self):
input_json = read_json(self.input_path)
exporter = input_json['exporter']
if(exporter != 'elfin-solver'):
raise ValueError(f'Input file is apparently not generated by elfin-solver. Exporter: \"{exporter}\"');
self.show_solution_structure(input_json)
output = self.construct_output(input_json)
with open(self.output_path, 'w') as output_file:
json.dump(output,
output_file,
separators=(',', ':'),
ensure_ascii=False,
indent=4)
def construct_output(self, input_json):
print()
print('***************************************')
print('** Specify Extraction **')
print('***************************************')
pg_networks = input_json['pg_networks']
pg_networks_output = {}
for pgn_name in pg_networks:
print(f'Path Guide network \"{pgn_name}\"')
pgn = pg_networks[pgn_name]
pgn_output = {}
for dec_name in pgn:
print(f'|\n|-- Decimated Area \"{dec_name}\"')
dec = pgn[dec_name]
max_sol_idx = len(dec)
sol_idx = 0
input_ok = False
while (not input_ok):
sol_idx_input = input(f'Choose between solution indexes #{1} ~ #{max_sol_idx} (default is #1): ')
try:
# Default case.
if len(sol_idx_input) == 0:
sol_idx = 1
input_ok = True
break
sol_idx = int(sol_idx_input)
if 1 <= sol_idx <= max_sol_idx:
input_ok = True
else:
raise ValueError('Out of range')
except ValueError:
print('Invalid input. Try again!')
sol = dec[sol_idx - 1]
print(f'Extracting Solution #{sol_idx} {self.rep_solution(sol)}')
pgn_output[dec_name] = sol
pg_networks_output[pgn_name] = pgn_output
output = {'pg_networks': pg_networks_output}
return output
def show_solution_structure(self, input_json):
print()
print('***************************************')
print('** Solution file structure **')
print('***************************************')
pg_networks = input_json['pg_networks']
for pgn_name in pg_networks:
print(f'Path Guide network \"{pgn_name}\"')
pgn = pg_networks[pgn_name]
for dec_name in pgn:
print(f'|\n|-- Decimated Area \"{dec_name}\"')
dec = pgn[dec_name]
sol_idx = 1
for sol in dec:
print(f'|-- -- Solution #{sol_idx} {self.rep_solution(sol)}')
sol_idx += 1
print('')
def rep_solution(self, sol):
return f'(checksum {hex(sol["checksum"])[2:].rjust(10)}, score {sol["score"]:.2f})'
if __name__ == '__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/kabsch.py | .py | 4,082 | 161 | #!/usr/bin/env python3
"""
Calculate RMSD between two XYZ files
by: Jimmy Charnley Kromann <jimmy@charnley.dk> and Lars Andersen Bratholm <larsbratholm@gmail.com>
project: https://github.com/charnley/rmsd
license: https://github.com/charnley/rmsd/blob/master/LICENSE
"""
import numpy as np
import re
def kabsch_rmsd(P, Q):
"""Rotate matrix P unto Q and calculate the RMSD."""
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""Rotate matrix P unto matrix Q using Kabsch algorithm."""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def run_kabsch(P, Q):
"""The optimal rotation matrix U is calculated and then used to rotate matrix
P unto matrix Q so the minimum root-mean-square deviation (RMSD) can be
calculated.
Using the Kabsch algorithm with two sets of paired point P and Q,
centered around the center-of-mass.
Each vector set is represented as an NxD matrix, where D is the
the dimension of the space.
The algorithm works in three steps:
- a translation of P and Q
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters:
P -- (N, number of points)x(D, dimension) matrix
Q -- (N, number of points)x(D, dimension) matrix
Returns:
U -- Rotation matrix
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""Rotate matrix P unto Q and calculate the RMSD.
Based on doi:10.1016/1049-9660(91)90036-O
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P,rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""Get optimal rotation.
Note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3,:3]
return rot
def makeW(r1,r2,r3,r4=0):
"""Matrix involved in quaternion rotation."""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4] ])
return W
def makeQ(r1,r2,r3,r4=0):
"""Matrix involved in quaternion rotation."""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4] ])
return Q
def quaternion_rotate(X, Y):
"""Calculate rotation."""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T,W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
C1 = -np.sum(Qt_dot_W,axis=0)
C2 = 0.5*N
C3 = np.sum(W_minus_Q,axis=0)
A = np.dot(C3.T,C3)*C2-C1
eigen = np.linalg.eigh(A)
r = eigen[1][:,eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""Calculate the centroid from a vectorset X."""
C = sum(X)/len(X)
return C
def rmsd(V, W):
"""Calculate Root-mean-square deviation from two sets of vectors V and
W.
"""
D = len(V[0])
N = len(V)
rmsd = 0.0
for v, w in zip(V, W):
rmsd += sum([(v[i]-w[i])**2.0 for i in range(D)])
return np.sqrt(rmsd/N)
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/elfin_graph.py | .py | 622 | 24 | class ElfinGraph(object):
"""
A network of nodes that are connected either by doubles or through hubs.
Might be multi-chain.
"""
def __init__(self, name='', nodes=[]):
self.name = name
self.nodes = nodes
def __repr__(self):
return 'ElfinGraph: {{\n{}\n}}\n'.format(
'\n'.join((repr(n) for n in self.nodes)))
def transform(self, rot, tran):
for n in self.nodes:
n.transform(rot, tran)
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/hubinfo_convert.py | .py | 1,446 | 42 | #!/usr/bin/env python3
import argparse, sys
from collections import OrderedDict
from utilities import *
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Converts hub info metadata from csv to json.')
parser.add_argument('input') # Absence of dash denotes mandatory argument
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
csv_data = read_csv(args.input)
new_data = OrderedDict({})
for i in range(int(len(csv_data)/2)): # for half the length of the rows of the file do
row = csv_data[int(2*i)]
chains = csv_data[int(2*i+1)]
component_data = OrderedDict({})
for i in range(int(len(chains)/4)):
component_data[chains[int(4*i)]] = \
{ 'single_name': chains[int(4*i+1)],
'c_free': chains[int(4*i+3)] =='C_free',
'n_free': chains[int(4*i+2)] =='N_free' }
new_data[row[0].replace('.pdb', '')] = \
OrderedDict({ 'oligomer_type': row[1],
'symmetric': row[2] == 'symmetric',
'component_data': component_data})
json.dump(new_data,
open(args.input.replace('.csv', '') + '.json', 'w'),
separators=(',',':'),
ensure_ascii=False,
indent=4)
if __name__ == '__main__':
safe_exec(main)
| Python |
3D | Parmeggiani-Lab/elfin | elfinpy/job_dispatcher.py | .py | 1,014 | 36 | #!/usr/bin/env python3
import argparse, sys
import multiprocessing
import subprocess
from utilities import *
def dispatch(*cmd_and_arg):
"""Dispatches a process to run cmd with given arguments."""
subprocess.check_call(*cmd_and_arg)
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Run single threaded jobs in separate processes')
parser.add_argument('cmd_list')
parser.add_argument('-worker_count', default='cpu_count')
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
try:
if args.worker_count == 'cpu_count':
pool = multiprocessing.Pool(multiprocessing.cpu_count())
else:
pool = multiprocessing.Pool(int(args.worker_count))
except Exception as e:
raise e
pool.map(dispatch, read_csv(args.cmd_list, delim=' '))
if __name__ == '__main__':
safe_exec(main) | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/stitch.py | .py | 26,251 | 747 | #!/usr/bin/env python3
#
# This script creates the CIF atom model from a design solution exported from
# elfin-ui in JSON format.
#
from collections import deque
from collections import namedtuple
import sys
import argparse
import numpy as np
import Bio.PDB
import Bio.SubsMat.MatrixInfo
import Bio.PDB.StructureBuilder
try:
import utilities as utils
import pdb_utilities as pdb_utils
except ImportError:
from . import utilities as utils
from . import pdb_utilities as pdb_utils
def parse_args(args):
desc = ('Create CIF atom model from design solution JSON exported '
'by elfin-ui.')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file')
parser.add_argument('-o', '--out_file', default='')
parser.add_argument('-x', '--xdb', default='./resources/xdb.json')
parser.add_argument('-p', '--pdb_dir', default='./resources/pdb_aligned/')
parser.add_argument('-c', '--cappings_dir',
default='./resources/pdb_cappings')
parser.add_argument('-m', '--metadata_dir',
default='./resources/metadata/')
parser.add_argument('-s', '--show_fusion', action='store_true')
parser.add_argument('-d', '--disable_capping', action='store_true')
parser.add_argument('--skip_unused', action='store_true')
return parser.parse_args(args)
def main(test_args=None):
args = parse_args(sys.argv[1:] if test_args is None else test_args)
input_ext = args.input_file[args.input_file.rfind('.'):].lower()
if input_ext == '.json':
spec = utils.read_json(args.input_file)
xdb = utils.read_json(args.xdb)
struct = Stitcher(
spec,
xdb,
args.pdb_dir,
args.cappings_dir,
args.metadata_dir,
args.show_fusion,
args.disable_capping,
args.skip_unused
).run()
if args.out_file == '':
args.out_file = args.input_file
args.out_file = '.'.join(args.out_file.split('.')[:-1] + ['cif'])
print('Saving to:', args.out_file)
pdb_utils.save_cif(struct=struct, path=args.out_file)
else:
print('Unknown input file type: \"{}\"'.format(input_ext))
exit()
def validate_spec(spec):
if 'networks' not in spec:
return 'No networks object in spec.'
else:
if not spec['networks']:
return 'Spec file has no module networks.'
if 'pg_networks' in spec:
n_pgn = len(spec['pg_networks'])
if n_pgn > 0:
return ('Spec file has {} path guide networks. '
'It should have zero.').format(n_pgn)
def get_node(network, json_name):
node = network[json_name]
utils.check_mod_type(node['module_type'])
return node
TermIdentifierBase = namedtuple(
'TermIdentifierBase', ['ui_name', 'chain_id', 'term'])
class TermIdentifier(TermIdentifierBase):
"""Small class to hold terminus identifier data"""
def __new__(cls, *args, **kwargs):
self = super(TermIdentifier, cls).__new__(cls, *args, **kwargs)
utils.check_term_type(self.term)
return self
def __repr__(self):
return ':'.join((str(getattr(self, f)) for f in self._fields))
def same_chain_as(self, other):
return self.ui_name == other.ui_name and \
self.chain_id == other.chain_id
ChainIdentifierBase = namedtuple('ChainIdentifierBase', ['src', 'dst'])
class ChainIdentifier(ChainIdentifierBase):
"""
Small class to hold source and destination TermIdentifiers
for a chain
"""
def __new__(cls, *args, **kwargs):
self = super(ChainIdentifier, cls).__new__(cls, *args, **kwargs)
assert self.src.term == 'n'
assert self.dst.term == 'c'
return self
def __repr__(self):
return '{}->{}'.format(self.src, self.dst)
# Returns a list of all leaf TermIdentifiers.
#
# A leaf is a terminus that is either unoccupied or on a hub node.
def find_leaves(network, xdb):
try:
res = []
for ui_name in network:
node = get_node(network, ui_name)
mod_type = node['module_type']
mod_name = node['module_name']
chains = xdb['modules'][mod_type + 's'][mod_name]['chains']
cl = node['c_linkage']
nl = node['n_linkage']
if mod_type == 'hub':
for c in chains:
if chains[c]['n']:
res.append(TermIdentifier(ui_name, c, 'c'))
if chains[c]['c']:
res.append(TermIdentifier(ui_name, c, 'n'))
else: # Guaranteed to be 'single' thanks to get_node()
if not nl:
res.append(TermIdentifier(
ui_name, cl[0]['source_chain_id'], 'n'))
if not cl:
res.append(TermIdentifier(
ui_name, nl[0]['source_chain_id'], 'c'))
return res
except KeyError as ke:
print('KeyError:', ke)
print('Probably bad input format.')
exit()
# Walks the a chain starting with the src TermIdentifier, according to the
# network JSON object, yielding each TermIdentifier and next_linkage on the
# fly.
def walk_chain(network, src):
ui_name, chain_id, term = src
while True:
node = get_node(network, ui_name)
# Advance until either a hub or a single with dangling terminus is
# encountered.
next_linkages = [
l for l in node[utils.opposite_term(term) + '_linkage']
if l['source_chain_id'] == chain_id
]
assert len(next_linkages) <= 1, \
'Expected only next_linkages size <= 1 (since' \
'each node has max 2 linkages, one N and one C).'
term_iden = TermIdentifier(ui_name, chain_id, term)
next_linkage = next_linkages[0] if next_linkages else None
yield term_iden, next_linkage
if not next_linkage:
break
ui_name, chain_id = \
next_linkage['target_mod'], \
next_linkage['target_chain_id']
# Walks the network and returns a generator of ChainIdentifiers.
#
# This method guarantees that src->dst is in the direction of N->C.
def decompose_network(network, xdb, skip_unused=False):
src_q = deque()
visited = set()
# Find entry node to begin walking the network with.
leaves = find_leaves(network, xdb)
assert leaves, 'No leave nodes for network.'
src_q.extend(leaves)
while src_q:
src = src_q.popleft()
if src in visited:
# This could happen when termini identifiers on hubs are added
# before the termini on the other end of those chains are popped
# out of the queue.
continue
visited.add(src)
mod_type = None
chain_walker = walk_chain(network, src)
for term_iden, next_linkage in chain_walker:
ui_name, chain_id, term = term_iden
node = get_node(network, ui_name)
mod_type = node['module_type']
if not next_linkage:
dst = TermIdentifier(
ui_name, chain_id, utils.opposite_term(term))
if dst not in visited:
visited.add(dst)
srcdst = (src, dst) if term == 'n' else (dst, src)
chain_iden = ChainIdentifier(*srcdst)
yield chain_iden
if mod_type == 'hub':
# Add unvisited components as new chain sources.
hub = xdb['modules']['hubs'][node['module_name']]
for hub_chain_id in hub['chains']:
hub_chain = hub['chains'][hub_chain_id]
for term in utils.TERM_TYPES:
if hub_chain[term]: # If not dormant.
iden = (ui_name, hub_chain_id, term)
if iden not in visited:
src_q.append(iden)
break
if mod_type == 'hub':
# This is a "bypass" hub, i.e. the current hub component has
# interfaceable N and C terms, and the current chain goes
# through it without ending here.
#
# In this case, check for unused components that might not
# need to be placed since they aren't leaves nor connect to
# any leaf nodes.
hub = xdb['modules']['hubs'][node['module_name']]
for hub_chain_id in hub['chains']:
if hub_chain_id == chain_id:
continue
c_links = len([l for l in node['c_linkage']
if l['source_chain_id'] == hub_chain_id])
n_links = len([l for l in node['n_linkage']
if l['source_chain_id'] == hub_chain_id])
if c_links == n_links == 0:
if skip_unused:
print('Skipping unused hub component:',
ui_name, hub_chain_id)
else:
srcdst = (
TermIdentifier(ui_name, hub_chain_id, 'n'),
TermIdentifier(ui_name, hub_chain_id, 'c')
)
yield ChainIdentifier(*srcdst)
ModInfo = namedtuple('ModInfo', ['mod_type', 'mod_name', 'res', 'res_n'])
def transform_residues(res, rot, tran):
for r in res:
for a in r:
# Do the transform manually because BioPython has non
# standard multiplication order.
a.coord = rot.dot(a.coord) + tran
# Blend residue lists M = (1-w)M + wF, where M is an atom coordinate in
# moving_res, F is an atom coordinate in fixed_res, and w is the corresponding
# weight in weights.
#
# Also removes dirty atoms. If residues are not the same (name), only backbone
# atoms are blended.
def blend_residues(moving_res, fixed_res, weights):
# temporarily disable blending because it's causing horrible
# residue distortions
return
assert len(moving_res) == len(fixed_res)
assert len(moving_res) == len(weights)
for m, f, w in zip(moving_res, fixed_res, weights):
# Remove dirty atoms. They seem to crop up in the process of
# optimizing PDBs even if preprocess.py already removed them once.
#
# Also remove atoms not in fixed residue - this is only known to
# happen to CYS (HG) and HIS (HE1/HE2).
if m.resname == f.resname:
# Complain about absent atoms
for a in m:
if a.name not in pdb_utils.DIRTY_ATOMS and a.name not in f:
print(a.name, 'not in', f.resname)
to_remove = [a for a in m if a.name in
pdb_utils.DIRTY_ATOMS or a.name not in f]
for da in to_remove:
m.detach_child(da.name)
# Compute new position based on combination of two positions.
def compute_coord(a, b): return (1-w)*a.coord + w*b.coord
for ma in m:
if m.resname == f.resname:
# Identical residues should have the same atom positions
assert ma.name in f
ma.coord = compute_coord(ma, f[ma.name])
else:
# Only modify backbone atoms.
if ma.name in pdb_utils.BACKBONE_NAMES and \
ma.name in f:
ma.coord = compute_coord(ma, f[ma.name])
class Stitcher:
def __init__(
self,
spec,
xdb,
pdb_dir,
cappings_dir,
metadata_dir,
show_fusion=False,
disable_capping=False,
skip_unused=False,
):
spec_complaint = validate_spec(spec)
if spec_complaint:
print('Error:', spec_complaint)
exit()
self.spec = spec
self.xdb = xdb
self.pdb_dir = pdb_dir
self.cr_dir = cappings_dir
self.show_fusion = show_fusion
self.disable_capping = disable_capping
self.skip_unused = skip_unused
self.si = Bio.PDB.Superimposer()
self.chain_id = 0
# Parse and convert capping repeat indicies into a dictionary
self.capping_repeat_idx = {}
meta_csv = utils.read_csv(
metadata_dir + '/repeat_indicies.csv', delim=' ')
for row in meta_csv:
mod_name = row[0].split('.')[0].replace('DHR', 'D')
self.capping_repeat_idx[mod_name] = \
[int(idx) for idx in row[1:]]
def deposit_chain(self, network, chain_iden):
# n -src-> c ... n -dst-> c
print('Deposit chain:', chain_iden)
src, dst = chain_iden
atom_chain = self.new_chain()
# Build context to pass to subroutines.
def context(): return 0
context.atom_chain = atom_chain
context.network = network
context.last_node = None
chain_walker = walk_chain(network, src)
for term_iden, next_linkage in chain_walker:
context.term_iden = term_iden
context.next_linkage = next_linkage
print('Deposit {}->{}'.format(repr(term_iden),
next_linkage['target_mod']
if next_linkage else None))
context.node = get_node(network, term_iden.ui_name)
context.mod_info = self.get_mod_info(
context.node, term_iden.chain_id)
context.pref_res = []
context.main_res = [r.copy() for r in context.mod_info.res]
context.suff_res = []
if context.last_node:
# Midway through the chain - always displace N term.
self.displace_terminus(context, 'n')
else:
# Start of chain on the N side - cap N term.
self.cap_terminus(context, 'n')
if next_linkage:
# There's a next node - always displace C term.
self.displace_terminus(context, 'c')
else:
# There's no next node - cap C term.
self.cap_terminus(context, 'c')
all_res = context.pref_res + context.main_res + context.suff_res
for r in all_res:
r.id = (r.id[0], self.next_residue_id(), r.id[2])
rot = np.transpose(np.asarray(context.node['rot']))
r.transform(rot, context.node['tran'])
atom_chain.add(r)
if self.show_fusion:
# curr_chain = Bio.PDB.Chain.Chain(chain_id)
# chains.append(curr_chain)
print('TODO: show_fusion')
context.last_node = context.node
context.last_term_iden = term_iden
print('')
self.model.add(atom_chain)
def cap_terminus(self, deposit_context, term):
utils.check_term_type(term)
if self.disable_capping:
print('Capping disabled')
return
# Unpack context.
mod_info = deposit_context.mod_info
residues = deposit_context.main_res
chain_id = deposit_context.term_iden.chain_id
if mod_info.mod_type == 'single':
cap_name = mod_info.mod_name.split('_')[0 if term == 'n' else -1]
elif mod_info.mod_type == 'hub':
# If we were to cap hubs, we need to first check whether N
# term is an open terminus in this hub.
hub = self.xdb['modules']['hubs'][mod_info.mod_name]
chain = hub['chains'][chain_id]
cap_name = chain['single_name']
if chain[term]:
# Continue to capping as usual.
pass
else:
# No need to cap a hub component term that is a
# closed interface.
return
print('Capping {}({})'.format(term, cap_name))
pdb_path = '{}/{}_{}.pdb'.format(self.cr_dir, cap_name,
'NI' if term == 'n' else 'IC')
cap_and_repeat = pdb_utils.read_pdb(pdb_path)
cap_res = self.get_capping(
prime_res=residues,
cap_res=pdb_utils.get_residues(cap_and_repeat),
cr_r_ids=self.capping_repeat_idx[cap_name],
term=term
)
if term == 'n':
deposit_context.pref_res = cap_res
else:
deposit_context.suff_res = cap_res
# Computes the capping residues. Displaces primary residues (thus modifies
# the prime_res parameter).
def get_capping(self, prime_res, cap_res, cr_r_ids, term):
utils.check_term_type(term)
# Find residue index at which the residue id[1] matches capping
# start index. Residue id often does not start from 1 and is never
# 0-based.
rid_range = tuple(cr_r_ids[:2]) if term == 'n' else tuple(cr_r_ids[2:])
for i, el in enumerate(cap_res):
if el.id[1] == rid_range[0]:
match_start = i
break
else:
raise ValueError('Could not find residue index {}'.format(
rid_range[0]))
match_len = rid_range[1] - rid_range[0] + 1 # Inclusive
match_end = match_start + match_len
# N: match left, C: match right
prime_align_res = prime_res[:match_len] \
if term == 'n' else \
prime_res[-match_len:]
cap_align_res = cap_res[match_start:match_end]
prim_atoms = [r['CA'] for r in prime_align_res]
cap_atoms = [r['CA'] for r in cap_align_res]
self.si.set_atoms(prim_atoms, cap_atoms)
rot, tran = self.si.rotran
result = []
cap_protrude_res = cap_res[:match_start] + cap_res[match_end:]
for r in cap_protrude_res:
rr = r.copy()
rr.transform(rot, tran)
result.append(rr)
# Also transform cap align res to the right frame.
for r in cap_align_res:
r.transform(rot, tran)
# Displace prime_res using linear weights, the same method as
# displace_terminus().
# Linear weights (0, 1] - default for 'c'.
disp_w = [i/match_len for i in range(1, match_len + 1)]
if term == 'n':
disp_w.reverse() # Want [1, 0) for N term.
blend_residues(prime_align_res, cap_align_res, disp_w)
return result
def displace_terminus(self, deposit_context, term):
utils.check_term_type(term)
if term == 'n':
assert deposit_context.last_node
# Node A is on the C end, so we get the N end node, and swap
# order.
a_node = deposit_context.last_node
a_chain_id = deposit_context.last_term_iden.chain_id
a_info = self.get_mod_info(a_node, a_chain_id)
b_node = deposit_context.node
b_chain_id = deposit_context.term_iden.chain_id
b_info = deposit_context.mod_info
elif term == 'c':
next_linkage = deposit_context.next_linkage
assert next_linkage
# Node A is on the N end, so we get the C end node.
a_node = deposit_context.node
a_chain_id = deposit_context.term_iden.chain_id
a_info = deposit_context.mod_info
b_ui_name, b_chain_id = next_linkage['target_mod'], \
next_linkage['target_chain_id']
b_node = get_node(deposit_context.network, b_ui_name)
b_info = self.get_mod_info(b_node, b_chain_id)
types = (a_info.mod_type, b_info.mod_type)
if types == ('single', 'single'):
a_single_name = a_info.mod_name
b_single_name = b_info.mod_name
elif types == ('hub', 'single'):
hub = self.xdb['modules']['hubs'][a_info.mod_name]
a_single_name = hub['chains'][a_chain_id]['single_name']
b_single_name = b_info.mod_name
elif types == ('single', 'hub'):
a_single_name = a_info.mod_name
hub = self.xdb['modules']['hubs'][b_info.mod_name]
b_single_name = hub['chains'][b_chain_id]['single_name']
else:
raise ValueError('Unknown type tuple:', types)
a_single_len = self.get_single_len(a_single_name)
b_single_len = self.get_single_len(b_single_name)
dbl_name = a_single_name + '-' + b_single_name
dbl_pdb = pdb_utils.read_pdb(
self.pdb_dir + '/doubles/' + dbl_name + '.pdb')
dbl_res = pdb_utils.get_residues(dbl_pdb)
main_res = deposit_context.main_res
if term == 'n':
# Displace N term residues (first half of main_res) based on
# linear weights. In the double, start from B module.
#
# main_res: [n ... | ... c]
# disp_w: [1....0]
# dbl: [n ... | ... c] [n ... | ... c]
if b_info.mod_type == 'hub':
# Lift double (in A frame) to hub arm frame with A at the
# arm's tip.
chains = \
self.xdb['modules']['singles'][a_single_name]['chains']
tx_id = chains[a_chain_id]['c'][b_info.mod_name][b_chain_id]
tx = self.xdb['n_to_c_tx'][tx_id]
rot = np.asarray(tx['rot'])
tran = np.asarray(tx['tran'])
else: # Guaranteed to be 'single' thanks to get_node()
# Drop double to B frame.
rot, tran = self.get_drop_tx(a_single_name, b_single_name)
transform_residues(dbl_res, rot, tran)
disp_n = b_single_len // 2
disp_w = [i/disp_n for i in range(1, disp_n + 1)]
main_disp = main_res[:disp_n]
dbl_part = dbl_res[-b_single_len:-b_single_len+disp_n]
disp_w.reverse() # Make it 1 -> 0
elif term == 'c':
# Displace C term residues (second half of main_res) based on
# linear weights. In the double, start from end of A module and go
# backwards.
#
# main_res: [n ... | ... c]
# disp_w: [0....1]
# dbl: [n ... | ... c] [n ... | ... c]
if a_info.mod_type == 'hub':
# Step 1: Drop double to B frame.
rot, tran = self.get_drop_tx(a_single_name, b_single_name)
# Step 2: Lift double (in B frame) to hub arm frame.
chains = self.xdb['modules']['hubs'][a_info.mod_name]['chains']
tx_id = chains[a_chain_id]['c'][b_single_name][b_chain_id]
tx = self.xdb['n_to_c_tx'][tx_id]
hub_rot = np.asarray(tx['rot'])
hub_tran = np.asarray(tx['tran'])
tran = hub_rot.dot(tran) + hub_tran
rot = hub_rot.dot(rot)
transform_residues(dbl_res, rot, tran)
disp_n = a_single_len // 2
disp_w = [i/disp_n for i in range(1, disp_n + 1)]
main_disp = main_res[-disp_n:]
dbl_part = dbl_res[disp_n:disp_n+disp_n]
blend_residues(main_disp, dbl_part, disp_w)
def get_drop_tx(self, a_single_name, b_single_name):
a_chains = self.xdb['modules']['singles'][a_single_name]['chains']
assert len(a_chains) == 1
a_chain_id = list(a_chains.keys())[0]
a_b_chains = a_chains[a_chain_id]['c'][b_single_name]
assert len(a_b_chains) == 1
b_chain_id = list(a_b_chains.keys())[0]
tx_id = a_b_chains[b_chain_id]
tx = self.xdb['n_to_c_tx'][tx_id]
# Inverse tx because dbgen.py computes the tx that takes the
# single B module to part B inside double.
rot = np.transpose(tx['rot'])
tran = rot.dot(-np.asarray(tx['tran']))
return rot, tran
# Returns the number of residues in a single module.
def get_single_len(self, mod_name):
chains = self.xdb['modules']['singles'][mod_name]['chains']
assert len(chains) == 1
chain_id = list(chains.keys())[0]
return chains[chain_id]['n_residues']
def get_mod_info(self, node, chain_id):
mod_type = node['module_type']
mod_name = node['module_name']
# Obtain module residues.
pdb = pdb_utils.read_pdb(self.pdb_dir + '/' + mod_type +
's/' + mod_name + '.pdb')
res = pdb_utils.get_residues(pdb, chain_id)
res_n = len(res)
return ModInfo(mod_type, mod_name, res, res_n)
def deposit_chains(self, network):
chain_iden_gen = decompose_network(network, self.xdb, self.skip_unused)
for chain_iden in chain_iden_gen:
self.deposit_chain(network, chain_iden)
def new_chain(self):
return Bio.PDB.Chain.Chain(self.next_chain_id())
def next_chain_id(self):
cid = str(self.chain_id)
self.chain_id += 1
return cid
def reset_residue_id(self):
self.residue_id = 1
def next_residue_id(self):
rid = self.residue_id
self.residue_id += 1
return rid
def run(self):
self.reset_residue_id()
self.model = Bio.PDB.Model.Model(0)
if self.show_fusion:
print('Note: show_fusion is on')
networks = self.spec['networks']
for nw_name in networks:
print('Processing network \"{}\"'.format(nw_name))
complaint = self.deposit_chains(networks[nw_name])
if complaint:
print('Error: {}', complaint)
exit()
# Create output
sb = Bio.PDB.StructureBuilder.StructureBuilder()
sb.init_structure('0')
structure = sb.get_structure()
structure.add(self.model)
return structure
if __name__ == '__main__':
utils.safe_exec(main)
| Python |
3D | Parmeggiani-Lab/elfin | elfinpy/rmsd.py | .py | 3,891 | 106 | #!/usr/bin/env python3
import argparse, sys
from utilities import *
def parse_args(args):
parser = argparse.ArgumentParser(description='Sliding-window RMSD calculator')
parser.add_argument('solution_dir') # Absence of dash denotes mandatory argument
parser.add_argument('minimised_dir')
parser.add_argument('-window_len', default=300)
parser.add_argument('-overlap_ratio', default=0.5)
parser.add_argument('-warn_threshold', default=5.0)
return parser.parse_args(args)
def main(test_args=None):
args = parse_args(sys.argv[1:] if test_args is None else test_args)
if(args.window_len < 0):
raise ValueError('Invalid window_len: must be greater than 0')
if(args.overlap_ratio < 0 or args.overlap_ratio > 1.0):
raise ValueError('Invalid overlap_ratio: must be between 0 and 1.0')
if(args.warn_threshold < 0):
raise ValueError('Invalid warn_threshold: must be greater than 0')
overlap = int(args.overlap_ratio * args.window_len)
print('Using window length {} and overlap ratio {} (={} CA atoms), warn at {}A'.format(
args.window_len, args.overlap_ratio, overlap, args.warn_threshold))
minimised_files = glob.glob(args.minimised_dir + '/*.pdb')
for i in range(0, len(minimised_files)):
minimised_file = minimised_files[i]
solution_file = args.solution_dir + minimised_file[minimised_file.rfind('/'):].replace('_0001.pdb', '.pdb')
minimised_pdb = read_pdb(minimised_file)
solution_pdb = read_pdb(solution_file)
minimised_CAs = []
solution_CAs = []
for a in minimised_pdb.get_atoms():
if a.id == 'CA':
minimised_CAs.append(a)
for a in solution_pdb.get_atoms():
if a.id == 'CA':
solution_CAs.append(a)
# Superimpose the two structures before comparing
si = Bio.PDB.Superimposer()
si.set_atoms(minimised_CAs, solution_CAs)
rot, tran = si.rotran
solution_pdb.transform(rot, tran)
minimised_CA_coords = []
solution_CA_coords = []
for a in minimised_pdb.get_atoms():
if a.id == 'CA':
minimised_CA_coords.append(a.coord)
for a in solution_pdb.get_atoms():
if a.id == 'CA':
solution_CA_coords.append(a.coord)
n_minimised_CAs =len(minimised_CA_coords)
if n_minimised_CAs != len(solution_CA_coords):
print('{}: Fatal! Number of CAs are different... Solution: {}, Minimised: {}'.format(
solution_file, len(solution_CA_coords), n_minimised_CAs))
else:
stats = {
'min': float('inf'),
'avg': float('nan'),
'max': -float('inf')
}
sum_win_rmsd = 0.0
start_index = 0
window_count = 0
while start_index + args.window_len - 1 < n_minimised_CAs:
sumD = 0.0
for i in range(start_index, start_index+args.window_len):
sumD += np.linalg.norm(minimised_CA_coords[i] - solution_CA_coords[i], 2)
winRmsd = np.sqrt(sumD / args.window_len)
if winRmsd < stats['min']:
stats['min'] = winRmsd
if winRmsd > stats['max']:
stats['max'] = winRmsd
sum_win_rmsd += winRmsd
window_count += 1
start_index += overlap
stats['avg'] = sum_win_rmsd / window_count
print('{:20} avg: {:10} min {:10} max {:10}'.format(
solution_file, stats['avg'], stats['min'], stats['max']))
if stats['max'] > args.warn_threshold:
print('Warning: max window RMSD of {} exceeded!'.format(args.warn_threshold))
if __name__ == '__main__':
safe_exec(main)
| Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/PlotPerf.py | .py | 4,274 | 183 | #!/usr/bin/env python
import ElfinUtils
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
def main():
# Note: inf length ~22
machines = [
'IvyBridge\nZoo\n24-core\n2.7Ghz',
'KNL\nZoo\n64-core\n1.3Ghz',
'JetsonTX1\nZoo\n4-core\n1.9Ghz',
'Core i7\nMacbookPro\n4-core\n2.6Ghz',
'SandyBridge\nBC3\n16-core\n2.6Ghz',
'SandyBridge\nSwan\n8-core\n2.6Ghz'
]
# Intel 2s12c1t IvyBridge Xeon E5-2697 v2 2.7Ghz
ivy1t = 26240.0
ivy24t = 1344.0
ivyCores = 24
ivyInfTime = 38633 #score 8255
# Intel 1s64c4t Xeon Phi Kights Landing 1.3Ghz
knl1t = 128642.0
knl64t = 2479.0
knlCores = 64
knlInfTime = 58713 #score 8255
# nVidia 4s1c1t Jetson TX1 1.9Ghz
jet1t = 59776.0
jet4t = 14929.0
jetCores = 4
jetInfTime = 6*60000+56401 #score 8682
# MBP Intel 1s4c2t IvyBridge Core i7 2.6Ghz
mbp1t = 25204.0
mbp4t = 8000.0
mbpCores = 4
mbpInfTime = 4*60000+58061 #score 8696
# BC3 Intel 2s8c1t SandyBridge 2.6Ghz
bc1t = 27211.0
bc16t = 1905.0
bcCores = 16
bcInfTime = 48027 #score 9215
# Swan Intel 1s8c2t SandyBridge 2.6Ghz
swa1t = 28566.0
swa8t = 4313.0
swaCores = 8
swaInfTime = 1*60000+47078 #score 10433
speedUps = [
ivy1t/ivy24t,
knl1t/knl64t,
jet1t/jet4t,
mbp1t/mbp4t,
bc1t/bc16t,
swa1t/swa8t
]
cores = [
ivyCores,
knlCores,
jetCores,
mbpCores,
bcCores,
swaCores
]
times = [ t / 1000.0 for t in [
ivyInfTime,
knlInfTime,
jetInfTime,
mbpInfTime,
bcInfTime,
swaInfTime
]]
speedUpsPerCore = [s/c for (s,c) in zip(speedUps, cores)]
N = len(speedUpsPerCore)
ind = np.arange(N)
width = 0.7
hOffsetPerc = 0.01
fontSize = 16
axesArea = [0.1, 0.15, .8, .8]
figSize = (24, 9)
transparent = True
fig1 = plt.figure(figsize=figSize)
ax1 = plt.axes(axesArea, frameon=True)
# Beautiful color switching code thanks to user1839053's SO post at
# http://stackoverflow.com/questions/4971269/how-to-pick-a-new-color-for-each-plotted-line-within-a-figure-in-matplotlib
bars = ax1.bar(ind, speedUpsPerCore, width)
color=iter(cm.rainbow(np.linspace(0,1,N)))
for b in bars:
b.set_color(next(color))
ax1.set_ylabel('Normalised OpenMP Speedup', fontsize=fontSize+5)
plt.xticks(ind, machines)
ax1.tick_params(axis='both', which='minor', labelsize=fontSize)
ax1.tick_params(axis='both', which='major', labelsize=fontSize)
rects1 = ax1.patches
labels1 = [('(' + str(round(s, 1)) + 'x)') for s in speedUps]
hOffset = max(speedUpsPerCore) * hOffsetPerc;
for rect, label in zip(rects1, labels1):
height = rect.get_height() + hOffset
ax1.text(rect.get_x() + rect.get_width()/2, height, label, ha='center', va='bottom', fontsize=fontSize)
fig1.show()
plt.savefig('OpenMP.png', transparent=transparent)
# Hijack
machines = [
'Intel i7\n3720\nMacbookPro\n4-core\n2.6Ghz',
'Intel i7\n4790\nZoostorm\n4-core\n2.6Ghz',
'Intel Xeon\nE5-2697 v2\n(Zoo)\n24-core\n2.7Ghz',
'Intel Xeon\nPhi 7210\n(Zoo)\n64-core\n1.3Ghz',
'Intel Xeon\nE5-2670\n(BC3)\n16-core\n2.6Ghz',
'Intel Xeon\nE5-2670\n(Swan)\n8-core\n2.6Ghz',
'Intel Xeon\nE5-2640\n(Bluegem)\n16-core\n2.6Ghz',
'Nvidia Tesla\nK40m\n(Zoo)\n4.3TFlops',
'Nvidia GTX\n980 Ti\n(Zoo)\n5.6TFlops',
'Nvidia GTX\n1080 Ti\n(Zoo)\n10.6TFlops'
]
times = [
1110.3,
779.2,
209.9,
269.7,
261.5,
448.2,
293.8,
234.8,
227.1,
167.7
]
N = len(times)
ind = np.arange(N)
# Plot time
fig2 = plt.figure(figsize=figSize)
ax2 = plt.axes(axesArea, frameon=True)
bars = ax2.bar(ind, times, width)
color=iter(cm.rainbow(np.linspace(0,1,N)))
for b in bars:
b.set_color(next(color))
ax2.set_ylabel('Time-to-solution (s)', fontsize=fontSize+5)
plt.xticks(ind, machines)
ax2.tick_params(axis='both', which='minor', labelsize=fontSize)
ax2.tick_params(axis='both', which='major', labelsize=fontSize)
rects2 = ax2.patches
labels2 = [round(t, 1) for t in times]
hOffset = max(times) * hOffsetPerc;
for rect, label in zip(rects2, labels2):
height = rect.get_height() + hOffset
ax2.text(rect.get_x() + rect.get_width()/2, height, label, ha='center', va='bottom', fontsize=fontSize)
fig2.show()
plt.savefig('TimeToSol.png', transparent=transparent)
input()
if __name__ == '__main__':
ElfinUtils.safeExec(main) | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/RMSDStatRosetta.py | .py | 718 | 27 | #!/usr/bin/env python
import glob, sys
from ElfinUtils import *
### Rosetta overall score based... deprecated in favour of windowed RMSD (RMSDStat.py)
if(len(sys.argv) < 2):
print './RMSDStat.py <scoreDir>'
exit()
scoreDir = sys.argv[1]
files = glob.glob(scoreDir + '/*_comp.sc')
nFiles = len(files)
rmsds = []
for i in range(0, nFiles):
with open(files[i], 'r') as file:
line = file.read().split('\n')[-2]
rmsdStr = line.split(' ')[-2]
print '{} RMSD: {}'.format(files[i], rmsdStr)
rmsds.append(float(rmsdStr))
maxRmsd = max(rmsds)
print 'Average: {}, Min: {}, Max: {}'.format(sum(rmsds)/nFiles, min(rmsds), maxRmsd)
if(maxRmsd > 5.0):
print 'WARNING: One or more molecules exceed 5A RMSD!' | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/AnalyseComplexity.py | .py | 1,664 | 70 | #!/usr/bin/env python
import ElfinUtils
import json
import argparse
import numpy as np
from decimal import Decimal
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def main():
ap = argparse.ArgumentParser(description='Compute the number of combinations for a given MMC protein length');
ap.add_argument('--xdbFile', default='./resources/xdb.json')
ap.add_argument('--length', type=int, default=21)
globals().update(vars(ap.parse_args()))
print xdbFile
with open(xdbFile, 'r') as file:
xdb = json.load(file)
dd = xdb['doublesData']
singleNames = xdb['singlesData'].keys()
dim = len(singleNames)
dim = len(singleNames)
adjMat = np.zeros([dim, dim])
for pdk in dd.keys():
i1 = singleNames.index(pdk)
for pdkk in dd[pdk].keys():
i2 = singleNames.index(pdkk)
adjMat[i1][i2] = 1
mmcYs = []
for l in xrange(1, length):
nCombs = Decimal(np.sum(np.linalg.matrix_power(adjMat, l)))
mmcYs.append(Decimal(nCombs))
# print 'L={}, NC={}'.format(l+1, nCombs)
# A typical repeat module is ~100 AA
Xs = np.asarray(range(2, length + 1))
Xs = [x*100 for x in Xs]
mmcYs = np.asarray(mmcYs)
aaYs = np.power(20.0, np.asarray(Xs, dtype=np.float64))
fig, ax1 = plt.subplots()
ax1.set_xlabel('Design Length/AA')
# ax2 = ax1.twinx()
ElfinUtils.pauseCode()
ax1.plot(Xs[:len(aaYs)], aaYs, label='AA')
ax1.set_ylabel('No. Combs (log scale)')
ax1.set_yscale('log')
ax1.plot(Xs, mmcYs, label='MMC')
# xTickIds = np.arange(3, len(Xs) + 1, 5)
# xTickIds = np.insert(xTickIds, 0, 0)
# plt.xticks(xTickIds+2, [Xs[xtId] for xtId in xTickIds])
plt.legend()
plt.show()
if __name__ == '__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/GenBench.py | .py | 6,343 | 192 | #!/usr/bin/env python
import Bio.PDB, json
import numpy, random, string, math, codecs
from collections import OrderedDict
from time import gmtime, strftime
import sys, os
import argparse
haveCmd = False
try:
from pymol import cmd
cmd.reinitialize()
haveCmd = True
except ImportError:
print 'Could not import pymol cmd. Not running as pymol plugin...'
# Python only accepts dynamic loading with absolute path
# Need a better way of doing this
elfinDir = '{}/src/elfin/'.format(os.getenv("HOME"))
elfinPyLibDir = elfinDir + '/src/Python/'
import imp
ElfinUtils = imp.load_source('ElfinUtils', elfinPyLibDir + '/ElfinUtils.py')
def main():
ap = argparse.ArgumentParser(description='Generate Grid Search configurations');
ap.add_argument('outdir', default='bm/')
ap.add_argument('num', type=int)
ap.add_argument('--length', type=int, default=10)
ap.add_argument('--maxRetries', type=int, default=-1)
ap.add_argument('--dbFile', default=elfinDir + 'resources/xdb.json')
ap.add_argument('--singlesDir', default='./resources/pdb_aligned/singles/')
ap.add_argument('--doublesDir', default='./resources/pdb_aligned/doubles/')
ap.add_argument('--radiusType', default='maxHeavy')
args = ap.parse_args()
bg = BenchmarkGenerator(args.dbFile,
args.doublesDir,
args.singlesDir,
args.outdir,
args.radiusType)
ElfinUtils.safeExec(bg.run, args.num, args.length, args.maxRetries)
class BenchmarkGenerator:
def __init__(self,
dbFile,
doublesDir,
singlesDir,
outDir,
collisionMeasure):
def makeSelf():
with open(dbFile, 'r') as openFile:
self.xDB = json.load(openFile)
for k, v in self.xDB.iteritems():
setattr(self, k, v)
self.nonTerms = []
for k, v in self.singlesData.iteritems():
if(v['linkCount'] > 1):
self.nonTerms.append(k)
print('DB has {} non-terminal nodes'.format(len(self.nonTerms)))
self.doublesDir = doublesDir
self.singlesDir = singlesDir
self.outDir = outDir
self.bmarks = []
# Collision measure is the radius type used to check collision
assert collisionMeasure in ElfinUtils.RADII_TYPES
self.collisionMeasure = collisionMeasure
ElfinUtils.safeExec(makeSelf)
def chooseNextNode(self, nodes, shape):
lastNode = nodes[-1]
links = self.pairsData[lastNode].keys();
collide = True
while(collide):
newNodeId = random.randint(0, len(links) - 1)
newNode = links[newNodeId]
collide = ElfinUtils.checkCollision(self.xDB, self.collisionMeasure, nodes, newNode, shape)
if collide:
links.remove(newNode)
if len(links) == 0:
print 'Stopped because all links lead to collision'
print 'Available links: {}'.format(
[str(k) for k in self.pairsData[lastNode].keys()])
raise UserWarning('Genereation could not continue')
return newNode
def gen(self, chainLen):
nodes = []
# Step 1: Pick starting single from non-terminal nodes
nNonTerms = len(self.nonTerms)
nodes.append(self.nonTerms[random.randint(0, nNonTerms-1)])
# Shape (array of CoMs) starts from origin
coms = numpy.zeros(shape=(1,3), dtype='float64')
# Main structure generation loop
# Keep adding a next node from any node until either
# specified length is reached
for i in xrange(0, chainLen - 1):
lastNode = nodes[i]
newNode = self.chooseNextNode(nodes, coms)
nodes.append(newNode)
rel = self.pairsData[lastNode][newNode]
coms = numpy.append(coms, [rel['comB']], axis=0)
coms = numpy.dot(coms, numpy.asarray(rel['rot'])) + rel['tran']
# Move display/print/postprocess to after construction succeeded
# Makes generation faster
motherPdb, _ = ElfinUtils.makePdbFromNodes(
self.xDB,
nodes,
elfinDir + self.doublesDir,
elfinDir + self.singlesDir
)
if haveCmd:
tmpFile = './elfin.tmp'
ElfinUtils.savePdb(motherPdb, tmpFile)
cmd.load(tmpFile, str(i) + '-' + pairName)
cmd.hide('everything', 'all')
cmd.show('cartoon', 'all')
cmd.reset()
cmd.util.cbc()
self.bmarks.append({
'pdb': motherPdb,
'data': OrderedDict([
('nodes', nodes),
('coms', coms.tolist())
])
})
def run(self, nBmarks, chainLen, maxRetries):
count = 0
retries = 0
# Outer retry loop
while count < nBmarks:
print 'Attempt #{}'.format(retries)
try:
for i in xrange(count, nBmarks):
print('Genereating #{}/{}'.format(i+1, nBmarks))
self.gen(chainLen)
count = count + 1
# if all fine then retry is reset
retries = 0
except UserWarning as uw:
print 'Warning: {}'.format(uw)
if maxRetries is not -1 and retries >= maxRetries:
ElfinUtils.pauseCode()
print 'Warning: Maximum retries reached - stopping'
break
else:
retries = retries + 1
# When done trying, dump metadata and PDB
print 'Dumping output to {}'.format(self.outDir)
bmNameLen = 4
for bm in self.bmarks:
bmName = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(bmNameLen))
outFile = self.outDir + '/' + bmName
json.dump(bm['data'],
open(outFile + '.json', 'w'),
separators=(',', ':'),
ensure_ascii=False,
indent=4)
ElfinUtils.savePdb(bm['pdb'], outFile + '.pdb')
main()
| Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/Json2Csv.py | .py | 679 | 31 | #!/usr/bin/env python
import glob, json, sys
from utilities import *
### Converts spec JSON files into pure CoMs in CSV form ###
if len(sys.argv) < 2:
print './Json2Csv.py <sourceDir> <outDir=sourceDir>'
exit()
sourceDir = sys.argv[1]
outDir = sourceDir
if len(sys.argv) >= 3:
outDir = sys.argv[2]
jsonFiles = glob.glob(sourceDir + '/*.json')
for i in range(0, len(jsonFiles)):
inFile = jsonFiles[i]
outFile = outDir + inFile[inFile.rfind('/'):].replace('.json', '.csv')
inSpec = readJSON(inFile)
outStr = ''
for com in inSpec['coms']:
outStr += '{} {} {}\n'.format(com[0], com[1], com[2])
with open(outFile, 'w') as of:
of.write(outStr)
print 'Done' | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/GridSearchParams.py | .py | 2,642 | 93 | # This file contains versioned grid search parameter data
def getGSParams(gsVersion):
if gsVersion == 1:
return getGSParamsV1()
if gsVersion == 2:
return getGSParamsV2()
print 'Unknown Grid Search version: {}'.format(gsVersion)
def getGSParamsV2():
# Define the grid
chromoLenDevs = [0.2]
gaPopSizes = [int(100e3)]
gaIters = [int(1e3)]
gaSurviveRates = [0.02, 0.03, 0.05]
gaCrossRates = [0.2, 0.4, 0.6]
gaPointMutateRates = []
gaLimbMutateRates = []
# Create 3 configs - for 3 different benchmarks shapes
# using the same config
bmNames = ['6vjex8d', '9y8hxgo', 'j0m06n4']
# PM and LM rates depend on Cross rates
pmRatios = (0.3, 0.5, 0.70)
for cr in gaCrossRates:
# Each remaining portion after cross rates
# generate 3 ratios of RM and LM
rem = 0.9999 - cr
for pmRatio in pmRatios:
(pm, lm) = (pmRatio * rem, (0.9999 - pmRatio) * rem)
gaPointMutateRates.append(pm)
gaLimbMutateRates.append(lm)
nRuns = len(chromoLenDevs) * len(gaPopSizes) * len(gaIters) * \
len(gaSurviveRates) * len(gaCrossRates) * len(gaPointMutateRates) * \
len(bmNames)
return {
'chromoLenDevs': chromoLenDevs,
'gaPopSizes': gaPopSizes,
'gaIters': gaIters,
'gaSurviveRates': gaSurviveRates,
'gaCrossRates': gaCrossRates,
'gaPointMutateRates': gaPointMutateRates,
'gaLimbMutateRates': gaLimbMutateRates,
'bmNames': bmNames,
'pmRatios': pmRatios,
'nRuns': nRuns
}
def getGSParamsV1():
# Define the grid
chromoLenDevs = [0.1, 0.2, 0.3]
gaPopSizes = [int(100e3)]
gaIters = [int(1e3)]
gaSurviveRates = [0.005, 0.01, 0.02]
gaCrossRates = [0.3, 0.5, 0.7]
gaPointMutateRates = []
gaLimbMutateRates = []
# Create 3 configs - for 3 different benchmarks shapes
# using the same config
bmNames = ['6vjex8d', '9y8hxgo', 'j0m06n4']
# PM and LM rates depend on Cross rates
pmRatios = (0.25, 0.5, 0.75)
for cr in gaCrossRates:
# Each remaining portion after cross rates
# generate 3 ratios of RM and LM
rem = 0.9999 - cr
for pmRatio in pmRatios:
(pm, lm) = (pmRatio * rem, (0.9999 - pmRatio) * rem)
gaPointMutateRates.append(pm)
gaLimbMutateRates.append(lm)
nRuns = len(chromoLenDevs) * len(gaPopSizes) * len(gaIters) * \
len(gaSurviveRates) * len(gaCrossRates) * len(gaPointMutateRates) * \
len(bmNames)
return {
'chromoLenDevs': chromoLenDevs,
'gaPopSizes': gaPopSizes,
'gaIters': gaIters,
'gaSurviveRates': gaSurviveRates,
'gaCrossRates': gaCrossRates,
'gaPointMutateRates': gaPointMutateRates,
'gaLimbMutateRates': gaLimbMutateRates,
'bmNames': bmNames,
'pmRatios': pmRatios,
'nRuns': nRuns
} | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/pymol_helpers/gen_transform.py | .py | 1,025 | 43 | #!/usr/bin/env python
import argparse, sys
import numpy as np
from utilities import *
def main():
ap = argparse.ArgumentParser(description='Generate Pymol Transform');
ap.add_argument('double_name')
ap.add_argument('--xdb_path', default='./resources/xdb.json')
ap.add_argument('--double_dir', default='./resources/pdb_aligned/doubles/')
args = ap.parse_args()
xDB = readJSON(args.xdb_path)
reset_string = \
('delete {}\n' + \
'load {}\n' + \
'hide everything, {}\n' + \
'show cartoon, {}\n') \
.format(
args.double_name,
args.double_dir + '/' + args.double_name + '.pdb',
args.double_name,
args.double_name
)
single_names = args.double_name.split('-')
rel = xDB['doublesData'][single_names[0]][single_names[1]]
pymol_rot_mat_str = gen_pymol_txm(rel['rot'], rel['tran'])
tx_string = \
'cmd.transform_selection({}, {}, homogenous=0)' \
.format(
"\'" + args.double_name + "\'",
pymol_rot_mat_str
)
print(reset_string)
print(tx_string)
if __name__ == '__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | elfinpy/obsolete/pymol_helpers/gen_spec_sol_rot.py | .py | 1,130 | 41 | #!/usr/bin/env python
import argparse, sys
import numpy as np
import kabsch
from utilities import *
def get_spec_sol_rot(spec_file, sol_csv):
if spec_file.rfind('.csv') != -1:
spec_pts = read_csv_points(spec_file)
elif spec_file.rfind('.json') != -1:
with open(spec_file, 'r') as file:
spec_pts = np.asarray(json.load(file)['coms'])
else:
print 'Unknown spec file format'
solPts = read_csv_points(sol_csv)
# Centre both pts to their ends
centred_spec = spec_pts - spec_pts[-1]
centred_sol = solPts - solPts[-1]
# Equalise sample points
sol_up_pts = upsample(centred_spec, centred_sol)
sol_up_pts = sol_up_pts - sol_up_pts[-1]
# Find Kabsch rotation for solution -> spec
rot = Kabsch.kabsch(sol_up_pts, centred_spec)
return gen_pymol_txm(rot)
def main():
ap = argparse.ArgumentParser(
description='Generate spec to solution rotation string for Pymol')
ap.add_argument('spec_file')
ap.add_argument('sol_file')
args = ap.parse_args()
print(get_spec_sol_rot(args.spec_file, args.sol_file))
if __name__ == '__main__':
main() | Python |
3D | Parmeggiani-Lab/elfin | tests/__init__.py | .py | 0 | 0 | null | Python |
3D | Parmeggiani-Lab/elfin | tests/helper.py | .py | 1,304 | 41 | #!/usr/bin/env python3
import pytest
import importlib
from functools import partial
def _test(module_name=None, error_type=None, module_test_callback=None, assert_callback=None, package_name=None):
if module_name is None or \
error_type is None or \
module_test_callback is None or \
assert_callback is None or \
package_name is None:
raise ValueError('Insufficient arguments supplied to helper._test()')
with pytest.raises(error_type) as e:
full_module_name = '.'.join([package_name, module_name])
module_test_callback(importlib.import_module(full_module_name))
assert assert_callback(e)
def _test_error_str(module_name, error_type=None, error_str_search=None, package_name=None):
_test(
module_name=module_name,
error_type=error_type,
module_test_callback=lambda mod: mod.main(),
assert_callback=lambda re: error_str_search in str(re.value),
package_name=package_name
)
_test_script_main = partial(
_test,
error_type=SystemExit,
module_test_callback=lambda mod: mod.main(['--help']),
assert_callback=lambda se: se.value.code == 0 # --help should return 0
)
_test_non_executable = partial(
_test_error_str,
error_type=RuntimeError,
error_str_search='executed'
) | Python |
3D | Parmeggiani-Lab/elfin | tests/test_pymol_extensions/__init__.py | .py | 135 | 4 | import sys, os
curr_dir = os.getcwd()
sys.path.append(os.path.realpath(os.path.join(curr_dir, 'pymol_scripts'))) # for test scripts | Python |
3D | Parmeggiani-Lab/elfin | tests/test_pymol_extensions/test_mains.py | .py | 744 | 21 | import pytest
import importlib
from functools import partial
from tests import helper
test_package_name = 'extensions'
script_main_test = partial(helper._test_script_main, package_name=test_package_name)
non_executable_test = partial(helper._test_non_executable, package_name=test_package_name)
def test_mains():
non_executable_test('extension_template')
non_executable_test('transform_helper')
non_executable_test('load_all_extensions')
non_executable_test('draw_lines')
non_executable_test('batch_convert')
non_executable_test('extrude_hub_at_single_c')
non_executable_test('extrude_hub_at_single_n')
non_executable_test('extrude_single_at_single_c')
non_executable_test('extrude_single_at_single_n') | Python |
3D | Parmeggiani-Lab/elfin | tests/test_elfinpy/__init__.py | .py | 129 | 4 | import sys, os
curr_dir = os.getcwd()
sys.path.append(os.path.realpath(os.path.join(curr_dir, 'elfinpy'))) # for test scripts | Python |
3D | Parmeggiani-Lab/elfin | tests/test_elfinpy/test_mains.py | .py | 744 | 27 | import pytest
import importlib
from functools import partial
from tests import helper
test_package_name = 'elfinpy'
script_main_test = partial(helper._test_script_main, package_name=test_package_name)
non_executable_test = partial(helper._test_non_executable, package_name=test_package_name)
def test_mains():
# Test python code in order of workflow
script_main_test('template')
non_executable_test('utilities')
non_executable_test('pdb_utilities')
non_executable_test('kabsch')
script_main_test('preprocess')
script_main_test('hubinfo_convert')
script_main_test('dbgen')
script_main_test('stitch')
script_main_test('job_dispatcher')
script_main_test('rmsd')
script_main_test('stat_xdb') | Python |
3D | OpenMS/OpenMS | AGENTS.md | .md | 28,824 | 700 | # OpenMS Agent Notes
This file provides context and instructions for AI coding agents working on OpenMS. It follows the [AGENTS.md](https://agents.md) standard.
## Critical Constraints
**NEVER do these things:**
- Build the project unless explicitly asked (extremely resource-intensive)
- Modify files in `src/openms/extern/` (third-party vendored code)
- Commit secrets, credentials, or `.env` files
- Add `using namespace` or `using std::...` in header files
- Modify the contrib tree or third-party dependencies
- Skip tests when making code changes
## Quick Commands
```bash
# Configure (from OpenMS-build/ directory, adjust paths as needed)
cmake -DCMAKE_BUILD_TYPE=Debug ../OpenMS
# Build everything (includes tests)
cmake --build . -j$(nproc)
# Run all tests
ctest -j$(nproc)
# Run specific test by name pattern
ctest -R FeatureMap -j4
# Run tests with verbose output
ctest -R MyTest -V
# Run style checks
cmake --build . --target test_style
# Regenerate pyOpenMS after changes
rm pyOpenMS/.cpp_extension_generated
cmake --build . --target pyopenms -j4
# Run pyOpenMS tests
ctest -R pyopenms
# Check code formatting
clang-format --dry-run -Werror src/openms/source/MYFILE.cpp
```
## Key Docs in This Repo
- `README.md`, `CONTRIBUTING.md`, `ARCHITECTURE.MD`, `CODE_OF_CONDUCT.md`, `PULL_REQUEST_TEMPLATE.md`.
- `src/pyOpenMS/README.md`, `src/pyOpenMS/README_WRAPPING_NEW_CLASSES`.
- `share/OpenMS/examples/external_code/README.md`, `src/tests/external/README.md`.
- `dockerfiles/README.md`, `cmake/MacOSX/README.md`, `tools/jenkins/README.MD`.
- Doxygen (if built) in `OpenMS-build/doc/html/` including `index.html`, `developer_coding_conventions.html`, `developer_cpp_guide.html`, `developer_how_to_write_tests.html`, `howto_commit_messages.html`, `developer_faq.html`, `developer_tutorial.html`, `install_linux.html`, `install_mac.html`, `install_win.html`, `pyOpenMS.html`.
## Repo Layout
- Default build directory: `OpenMS-build/` (out-of-tree).
- Core C++: `src/openms/`, `src/openms_gui/`, `src/openswathalgo/`, `src/topp/`.
- Tests: `src/tests/class_tests/openms/`, `src/tests/class_tests/openms_gui/`, `src/tests/topp/`.
- pyOpenMS: `src/pyOpenMS/` with `pxds/`, `addons/`, `pyopenms/`, `tests/`.
## Project Stack
- **Language**: C++20, Python 3.9+
- **Build**: CMake 3.24+, out-of-tree builds in `OpenMS-build/`
- **Testing**: CTest, GoogleTest-style macros, pytest for Python
- **Style**: `.clang-format` in repo root, cpplint via `ENABLE_STYLE_TESTING=ON`
- **Platforms**: Linux, macOS (Apple Clang), Windows (MSVC 2019+)
## Repository Layout
```
OpenMS/
├── src/
│ ├── openms/ # Core C++ library
│ │ ├── include/OpenMS/ # Headers (.h)
│ │ └── source/ # Implementation (.cpp)
│ ├── openms_gui/ # Qt-based GUI components
│ ├── openswathalgo/ # OpenSWATH algorithms
│ ├── topp/ # Command-line tools (TOPP)
│ ├── pyOpenMS/ # Python bindings
│ │ ├── pxds/ # .pxd declarations for autowrap
│ │ ├── addons/ # Python-only method additions
│ │ └── tests/ # Python tests
│ └── tests/
│ ├── class_tests/openms/source/ # C++ unit tests
│ └── topp/ # TOPP integration tests
├── cmake/ # CMake modules
├── doc/ # Documentation source
└── share/OpenMS/ # Runtime data files
```
## Build and Install
- **CMake minimum**: 3.21; **C++ standard**: C++20
- Out-of-tree build expected in `OpenMS-build/`; build in place for development (install prefixes are for system installs).
- Use `CMAKE_BUILD_TYPE=Debug` for development to keep assertions/pre/post-conditions.
- Dependencies via distro packages or the contrib tree; set `OPENMS_CONTRIB_LIBS` and `CMAKE_PREFIX_PATH` as needed (Qt, contrib).
- pyOpenMS build deps: `src/pyOpenMS/requirements_bld.txt`; enable with `-DPYOPENMS=ON` and optional `-DPY_NUM_THREADS`/`-DPY_NUM_MODULES`.
- Style checks: `ENABLE_STYLE_TESTING=ON` runs cpplint at `src/tests/coding/cpplint.py`.
**Required dependencies:**
- XercesC, Boost (date_time, regex, iostreams), Eigen3 (3.4.0+), libSVM (2.91+), COIN-OR or GLPK, ZLIB, BZip2, Qt6 (6.1.0+)
**Optional:** HDF5 (`-DWITH_HDF5=ON`), Apache Arrow/Parquet (`-DWITH_PARQUET=ON`)
## Platform-Specific Build Gotchas
### Windows
- **MSYS/MinGW NOT supported** — must use Visual Studio environment
- **MSVC 2019+ required** (version 1920+); AddressSanitizer needs this minimum
- **64-bit only**; use Visual Studio generator (not Ninja/Make)
- **Keep build paths short** to avoid path length issues
- **Never mix Release/Debug libraries** — causes stack corruption and segfaults
- Compiler must match between contrib and OpenMS builds
- HDF5 forced to static linking on MSVC
- OpenMP requires `/openmp:experimental` flag (set automatically) for SIMD support
- Nested OpenMP (`MT_ENABLE_NESTED_OPENMP`) defaults to OFF on MSVC
### macOS
- **Apple Clang (Xcode) required**; Homebrew for dependencies
- **AppleClang >= 15.0.0**: Requires `-ld_classic` linker flag (set automatically)
- Remove older Qt versions if they interfere with Qt6
- Qt6 requires `PrintSupport` component for platform plugin
- `QT_QPA_PLATFORM=minimal` helps for headless/remote GUI runs
- Code signing and notarization required for distribution (see `cmake/MacOSX/README.md`)
- `fix_dependencies.rb` script fixes RPATH for relocatable binaries
### Linux
- Package manager preferred for dependencies; contrib is fallback
- `-fPIC` flag applied automatically for shared library compatibility
- `QT_QPA_PLATFORM=minimal` for headless GUI test runs
- STL debug mode (`_GLIBCXX_DEBUG`) only supported with GCC in Debug builds
- System libraries (libc, libstdc++, libpthread, etc.) excluded from packaging
### Qt6 Issues
- **Minimum version**: 6.1.0
- If Qt6 not found: `-DCMAKE_PREFIX_PATH='<path_to_Qt6_lib_parent>'`
- WebEngineWidgets optional; if missing, JavaScript views disabled in TOPPView (warning only)
- Required components: Core, Network; GUI components need Widgets, Svg, OpenGLWidgets
### Boost from Homebrew Warning
- Statically linked Boost from system installs (brew) NOT fully supported
- Issue: Boost CMake doesn't expose transitive dependencies as targets
- Workaround: Use `-DBOOST_USE_STATIC=OFF` for shared libraries OR build Boost with contrib
### Common CMake Issues
- **CMAKE_SIZEOF_VOID_P bug**: Variable vanishes on CMake version updates → delete `CMakeFiles/` and `CMakeCache.txt`, rerun cmake
- **Eigen3 version detection**: Build system handles CMake's version checking quirks with Eigen3 4.0+ automatically
## Testing
- Unit/class tests: `src/tests/class_tests/<lib>/source/`, add to `executables.cmake`; data in `src/tests/class_tests/libs/data/` (prefix files with class name).
- TOPP tests: add to `src/tests/topp/CMakeLists.txt`, data in `src/tests/topp/`.
- GUI tests: `src/tests/class_tests/openms_gui/source/` (Qt TestLib).
- Build `all`/`ALL_BUILD` to include tests and `FuzzyDiff` (TOPP tests depend on it).
- Use `NEW_TMP_FILE` for each output file in tests; avoid side effects in comparison macros.
- Run with `ctest`, use `-R` for subset, `-V/-VV` for verbosity, `-C` for multi-config generators.
- Use `FuzzyDiff` for numeric comparisons; keep test data small; use whitelist for unstable lines.
- Test templates: `tools/create_test.php` (requires `make xml`).
- `START_SECTION` macro pitfalls: wrap template methods with 2+ arguments in parentheses.
- pyOpenMS tests: `ctest -R pyopenms` or `pytest` with `PYTHONPATH=/path/to/OpenMS-build/pyOpenMS` (run outside the source tree to avoid shadowing).
**Unit test example:**
```cpp
// src/tests/class_tests/openms/source/MyClass_test.cpp
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/PATH/TO/MyClass.h>
START_TEST(MyClass, "$Id$")
MyClass* ptr = nullptr;
START_SECTION(MyClass())
ptr = new MyClass();
TEST_NOT_EQUAL(ptr, nullptr)
END_SECTION
START_SECTION(void process(const MSSpectrum&))
MSSpectrum spec;
spec.push_back(Peak1D(100.0, 1000.0));
ptr->process(spec);
TEST_EQUAL(spec.size(), 1)
END_SECTION
delete ptr;
END_TEST
```
## Coding Conventions
- Indentation: 2 spaces for C++/headers, 4 spaces for Python/Cython (PEP 8); no tabs; Unix line endings.
- Spacing: after keywords (`if`, `for`) and around binary operators.
- Braces: opening/closing braces align; use braces even for single-line blocks (trivial one-liners may stay single-line).
- File names: class name matches file name; one class per file; always pair `.h` with `.cpp`.
- Templates: use `_impl.h` only when needed; `.h` must not include `_impl.h`.
- Names: classes/types/namespaces in PascalCase; methods lowerCamel; variables snake_case; private/protected members end with `_`.
- Enums and macros uppercase with underscores; avoid the preprocessor; prefer `enum class`.
- Parameters: lower_case with underscores; document ranges/units.
- File extensions: lowercase, except `ML`/`XML` and `mzData`.
- Use OpenMS primitive types from `OpenMS/CONCEPT/Types.h`.
- No `using namespace` or `using std::...` in headers; allowed in `.cpp`.
- Follow Rule-of-0 or Rule-of-6.
- Accessors: get/set pairs for protected/private members; no reference getters for primitive types.
- Exceptions: derive from `Exception::Base`; throw with file/line/`OPENMS_PRETTY_FUNCTION`; catch by reference; document possible exceptions.
- Doxygen: `@brief` + blank line + details; use `@defgroup/@ingroup`; use `.doxygen` files for free-standing docs; `@todo` includes assignee name.
- Comments: at least ~5% of code, use `//` style, plain English describing the next few lines.
- Each file preamble contains the `$Maintainer:$` marker.
- Formatting: use `./.clang-format` in supporting IDEs.
## Doxygen Documentation Style
OpenMS uses `/** */` block comments with `@` tags (not `\` backslash). `@brief` is **required** (not auto-generated from first line).
**File header (required in every .h file):**
```cpp
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
// --------------------------------------------------------------------------
// $Maintainer: Your Name $
// $Authors: Original Author, Your Name $
// --------------------------------------------------------------------------
```
**Class documentation:**
```cpp
/**
@brief An algorithm to decharge features (i.e. as found by FeatureFinder).
Detailed description goes here after a blank line.
Can span multiple lines.
@htmlinclude OpenMS_FeatureDeconvolution.parameters
@ingroup Analysis
*/
class OPENMS_DLLAPI FeatureDeconvolution : public DefaultParamHandler
```
**Method documentation with parameters:**
```cpp
/**
@brief Compute a zero-charge feature map from charged features.
Find putative ChargePairs, then score them and hand over to ILP.
@param[in] fm_in Input feature-map
@param[out] fm_out Output feature-map (sorted by position)
@param[in,out] cons Consensus map modified in place
@return The number of charge groups found
@throws Exception::MissingInformation if RT/MZ data missing
@throws Exception::InvalidParameter if threshold < 0
@note The original sequence is saved as MetaValue.
@warning This method modifies fm_out in place.
*/
Size compute(const FeatureMap& fm_in, FeatureMap& fm_out, ConsensusMap& cons);
```
**Parameter direction tags:** Always use `[in]`, `[out]`, or `[in,out]` for all parameters.
**Grouping constructors/destructors:**
```cpp
/** @name Constructors and Destructors
*/
//@{
/// Default constructor
FeatureDeconvolution();
/// Copy constructor
FeatureDeconvolution(const FeatureDeconvolution& source);
/// Destructor
~FeatureDeconvolution() override;
//@}
```
**Simple inline documentation:** Use `///` for brief single-line docs:
```cpp
/// Fragment mass tolerance for spectrum comparisons
double fragment_mass_tolerance_;
/// Is fragment mass tolerance given in ppm (or Da)?
bool fragment_tolerance_ppm_;
```
**Common Doxygen tags:**
| Tag | Usage |
|-----|-------|
| `@brief` | Required first line summary |
| `@param[in/out]` | Parameter with direction |
| `@return` | Return value description |
| `@throws` / `@exception` | Exceptions that may be thrown |
| `@note` | Important notes |
| `@warning` | Warnings about usage |
| `@ingroup` | Category grouping (e.g., `Analysis_ID`) |
| `@see` | Cross-references |
| `@todo` | Include assignee name: `@todo JohnDoe fix this` |
**Naming examples:**
│ ├── openms/ # Core C++ library
│ │ ├── include/OpenMS/ # Headers (.h)
│ │ └── source/ # Implementation (.cpp)
│ ├── openms_gui/ # Qt-based GUI components
│ ├── openswathalgo/ # OpenSWATH algorithms
│ ├── topp/ # Command-line tools (TOPP)
│ ├── pyOpenMS/ # Python bindings
│ │ ├── pxds/ # .pxd declarations for autowrap
│ │ ├── addons/ # Python-only method additions
│ │ └── tests/ # Python tests
│ └── tests/
│ ├── class_tests/openms/source/ # C++ unit tests
│ └── topp/ # TOPP integration tests
├── cmake/ # CMake modules
├── doc/ # Documentation source
└── share/OpenMS/ # Runtime data files
```
## Code Style (with Examples)
**Naming conventions:**
```cpp
// Classes/Types/Namespaces: PascalCase
class FeatureMap;
namespace OpenMS { }
// Methods: lowerCamelCase
void processSpectrum();
// Variables: snake_case
int peak_count = 0;
// Private/protected members: trailing underscore
double intensity_;
// Enums/macros: UPPER_SNAKE_CASE
enum class Status { RUNNING, COMPLETE };
#define OPENMS_DLLAPI
```
**File structure:**
```cpp
// MyClass.h - Header file
#pragma once
#include <OpenMS/KERNEL/MSSpectrum.h>
namespace OpenMS
{
class OPENMS_DLLAPI MyClass // Export macro required
{
public:
MyClass();
void process(const MSSpectrum& spectrum);
private:
double threshold_; // Trailing underscore
};
}
// MyClass.cpp - Implementation file
#include <OpenMS/PATH/TO/MyClass.h>
using namespace OpenMS; // OK in .cpp files
MyClass::MyClass() : threshold_(0.0) {}
void MyClass::process(const MSSpectrum& spectrum)
{
// 2-space indentation, braces on own lines
if (spectrum.empty())
{
OPENMS_LOG_WARN << "Empty spectrum\n"; // Use logging macros
return;
}
}
```
## C++ Guide (OpenMS-specific)
- `OPENMS_DLLAPI` on all non-template exported classes/structs/functions/vars; not on templates; include in friend operator declarations.
- Use OpenMS logging macros and `OpenMS::LogStream`; avoid `std::cout/err` directly.
- Use `ProgressLogger` in tools for progress reporting.
- Avoid `std::endl` for performance; prefer `\n`.
- Prefer `OpenMS::String` for numeric formatting and parsing (precision and speed).
- Use `Size`/`SignedSize` for STL `.size()` values.
- Avoid pointers; prefer references.
- Prefer forward declarations in headers; include only base class headers, non-pointer members, and templates.
## TOPP Tool Development
- Add new tool source (e.g., `src/topp/<Tool>.cpp`) and register in `src/topp/executables.cmake`.
- Register tool in `src/openms/source/APPLICATIONS/ToolHandler.cpp` to generate Doxygen help output.
- Define parameters in `registerOptionsAndFlags_()`; read with `getStringOption_` and related helpers.
- Document the tool and add to `doc/doxygen/public/TOPP.doxygen` where applicable.
- Add TOPP tests in `src/tests/topp/CMakeLists.txt`.
## pyOpenMS Wrapping
- Autowrap reads `.pxd` in `src/pyOpenMS/pxds/` and generates `pyopenms/pyopenms.pyx` -> `pyopenms.cpp` -> module.
- Addons in `src/pyOpenMS/addons/` inject Python-only methods (indent only; no `cdef class`).
- Keep `.pxd` signatures in sync with C++ APIs; update or remove `wrap-ignore` when wrapping changes.
- Always declare default and copy constructors in `.pxd`; use `cimport`, not Python `import`.
- For non-inheriting classes use `cdef cppclass ClassName:` with no base.
- Autowrap hints: `wrap-ignore`, `wrap-as`, `wrap-iter-begin/end`, `wrap-instances`, `wrap-attach`, `wrap-upper-limit`, `wrap-inherits`.
- Avoid custom `__init__` unless required; it overrides autowrap dispatchers.
- Use snake_case for Python-facing names and DataFrame columns.
- Do not add Python-only methods to `.pxd`; use addons or `_dataframes.py` wrappers.
- DataFrame pattern: `get_data_dict()` in addon returns numpy arrays; `get_df()` in `src/pyOpenMS/pyopenms/_dataframes.py` wraps with pandas.
- Type converters: implement in `src/pyOpenMS/converters/special_autowrap_conversionproviders.py`, register in `src/pyOpenMS/converters/__init__.py`.
- Gotchas: autowrap returns Python strings; do not `.decode()`. Avoid `cdef` for autowrap string returns. Avoid `cdef` typed variables for autowrap return values inside `def` methods; use Python type checks. Keep addons minimal; avoid redundant aliases.
- **CRITICAL: `wrap-doc:` formatting** - The autowrap parser (`PXDParser.py`) requires exactly `# ` (hash + 2 spaces) for all documentation continuation lines. Changing to `#` or `# ` will break parsing:
```python
# CORRECT format:
void myMethod() except + nogil
# wrap-doc:
# This is the documentation. <- hash + 2 spaces + text
# <- hash + 2 spaces (for blank lines)
# :param x: Description <- hash + 2 spaces + text
# WRONG format (will cause ValueError):
void myMethod() except + nogil
# wrap-doc:
# This is the documentation. <- only 1 space after hash - BREAKS!
# <- just hash - BREAKS!
```
The parser uses `line.startswith("# ")` to validate and continue parsing. Any deviation causes immediate failure.
- Regenerate after addon changes:
```bash
rm OpenMS-build/pyOpenMS/.cpp_extension_generated
cmake --build OpenMS-build --target pyopenms -j4
```
## Change-Impact Checklist
- New C++ class: add `.h`/`.cpp`, Doxygen docs, class test, `OPENMS_DLLAPI`, register in CMake lists.
- C++ API change: update `.pxd`/addons, pyOpenMS tests, and relevant docs; tag commits with `API` as needed.
- New/changed TOPP tool: register in `src/topp/executables.cmake` and `ToolHandler.cpp`, add docs, add TOPP tests and data.
- Parameter or I/O change: update tool docs/CTD, tests, and `CHANGELOG`; use `PARAM`/`IO` commit tags.
- File format change: update `FileHandler::NamesOfTypes[]`, schemas/validators, and tests.
## Contribution Workflow and Commit Messages
- Development follows Gitflow; use forks and open PRs against `develop`.
- Commit format: `[TAG1,TAG2] short summary` (<=120 chars, <=80 preferred), blank line, longer description, and `Fixes #N`/`Closes #N` when applicable.
- Commit tags: NOP, DOC, COMMENT, API, INTERNAL, FEATURE, FIX, TEST, FORMAT, PARAM, IO, LOG, GUI, RESOURCE, BUILD.
- PR checklist: update `AUTHORS` and `CHANGELOG`, run/extend tests, update pyOpenMS bindings when needed.
- Minimize pushes on open PRs (CI is heavy).
- Run `tools/checker.php` and/or `ENABLE_STYLE_TESTING` for local checks.
**Commit message example:**
**Formatting rules (C++):**
- 2 spaces indentation, no tabs (Python/Cython uses 4 spaces per PEP 8)
- Unix line endings (LF)
- Braces on their own lines, aligned
- Space after keywords (`if`, `for`, `while`)
- Always use braces, even for single-line blocks
## Testing Patterns
**Unit test structure:**
```cpp
// src/tests/class_tests/openms/source/MyClass_test.cpp
#include <OpenMS/CONCEPT/ClassTest.h>
#include <OpenMS/PATH/TO/MyClass.h>
START_TEST(MyClass, "$Id$")
MyClass* ptr = nullptr;
START_SECTION(MyClass())
ptr = new MyClass();
TEST_NOT_EQUAL(ptr, nullptr)
END_SECTION
START_SECTION(void process(const MSSpectrum&))
MSSpectrum spec;
spec.push_back(Peak1D(100.0, 1000.0));
ptr->process(spec);
TEST_EQUAL(spec.size(), 1)
END_SECTION
delete ptr;
END_TEST
```
**Adding tests:**
1. Create `src/tests/class_tests/openms/source/ClassName_test.cpp`
2. Add to `src/tests/class_tests/openms/executables.cmake`
3. Use `NEW_TMP_FILE(filename)` for temp output files
4. Test data goes in `src/tests/class_tests/libs/data/` (prefix with class name)
## Git Workflow
**Commit message format:**
```
[TAG1,TAG2] Short summary (<=80 chars preferred)
Longer description explaining why, not what.
Fixes #123
```
## Debugging and Profiling
- Linux: use `ldd` to inspect shared libs; `nm -C` for symbols; `perf`/`hotspot` for profiling.
- Windows: Dependency Walker or `dumpbin /DEPENDENTS` and `dumpbin /EXPORTS`.
- Memory checks: AddressSanitizer or valgrind with `tools/valgrind/openms_external.supp`.
**Valid tags:** `NOP`, `DOC`, `COMMENT`, `API`, `INTERNAL`, `FEATURE`, `FIX`, `TEST`, `FORMAT`, `PARAM`, `IO`, `LOG`, `GUI`, `RESOURCE`, `BUILD`
**Branch workflow:**
- Fork the repo, branch from `develop`
- Open PRs against `develop` (Gitflow)
- Minimize pushes on open PRs (CI is resource-heavy)
## Change Impact Checklist
When you change | Also update
----------------|------------
C++ class (new) | Add `.h`/`.cpp`, Doxygen docs, class test, `OPENMS_DLLAPI`, CMake registration
C++ API | `.pxd` files, pyOpenMS addons, tests, docs
TOPP tool (new) | `src/topp/executables.cmake`, `ToolHandler.cpp`, docs, TOPP tests
Parameters | Tool docs, CTD, tests, `CHANGELOG`
File format | `FileHandler::NamesOfTypes[]`, schemas, tests
## pyOpenMS Wrapping
**Key files:**
- `.pxd` declarations: `src/pyOpenMS/pxds/`
- Python addons: `src/pyOpenMS/addons/`
- Type converters: `src/pyOpenMS/converters/`
**Common patterns:**
```python
# In addons/MyClass.pyx - inject Python-only methods
def get_df(self):
"""Return pandas DataFrame."""
import pandas as pd
return pd.DataFrame(self.get_data_dict())
```
**Gotchas:**
- Always declare default and copy constructors in `.pxd`
- Use `cimport`, not Python `import` for Cython imports
- Autowrap returns Python strings; do NOT call `.decode()`
- Use snake_case for Python-facing names
## Verification Commands
After making changes, verify with:
```bash
# Check formatting
clang-format --dry-run -Werror <changed-files>
# Run relevant tests
ctest -R <ClassName> -V
# For pyOpenMS changes
cd OpenMS-build && ctest -R pyopenms -V
# Style check
cmake --build OpenMS-build --target test_style
```
## Key Documentation
**In-repo docs:**
- `README.md` - Project overview
- `CONTRIBUTING.md` - Contribution guidelines
- `src/pyOpenMS/README.md` - pyOpenMS development
- `src/pyOpenMS/README_WRAPPING_NEW_CLASSES` - Wrapping guide
**Online resources:**
- [OpenMS Documentation](https://openms.readthedocs.io/en/latest)
- [pyOpenMS API Reference](https://pyopenms.readthedocs.io/en/latest/apidocs/index.html)
- [Developer Coding Conventions](https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/developer_coding_conventions.html)
- [How to Write Tests](https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/developer_how_to_write_tests.html)
- [GitHub Wiki](https://github.com/OpenMS/OpenMS/wiki)
## Common Gotchas
1. **Template methods with 2+ args in tests**: Wrap in parentheses for `START_SECTION`
2. **GUI tests need display**: Set `QT_QPA_PLATFORM=minimal` for headless runs
3. **pyOpenMS tests shadow imports**: Run from outside source tree with `PYTHONPATH` set
4. **Windows paths**: Keep build paths short; use 64-bit only
5. **FuzzyDiff for numeric tests**: Build `all`/`ALL_BUILD` to include it
## Debugging Tips
```bash
# Linux: inspect shared libraries
ldd /path/to/binary
nm -C /path/to/library.so | grep MySymbol
# Memory checking
valgrind --suppressions=tools/valgrind/openms_external.supp ./MyTest
# Profile with perf
perf record -g ./MyTool input.mzML
perf report
```
## External Projects and Examples
- Example external CMake project: `share/OpenMS/examples/external_code/`.
- External test project: `src/tests/external/`.
- Use the same compiler/generator as OpenMS; set `OPENMS_CONTRIB_LIBS` and `OpenMS_DIR` when configuring.
## CI, Packaging, and Containers
- CI runs in GitHub Actions; CDash collects nightly results.
- Jenkins packaging uses `tools/jenkins/os_compiler_matrix.tsv` (edit only if needed).
- PR commands/labels: `/reformat`, label `NoJenkins`, comment `rebuild jenkins`.
- Container images: see `dockerfiles/README.md` and GHCR packages.
- macOS code signing/notarization: see `cmake/MacOSX/README.md`.
## Documentation Links (External)
### OpenMS Docs
- http://www.openms.org/
- http://www.OpenMS.de
- https://openms.readthedocs.io/en/latest
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/index.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/nightly/html/index.html
- http://www.openms.de/current_doxygen/html/
- https://pyopenms.readthedocs.io/en/latest/index.html
- https://pyopenms.readthedocs.io/en/latest/apidocs/index.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/OpenMSInstaller/
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/OpenMSInstaller/nightly/
- http://www.psidev.info/
### Doxygen Developer Pages (release/latest)
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/developer_tutorial.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/developer_coding_conventions.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/developer_cpp_guide.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/developer_how_to_write_tests.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/howto_commit_messages.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/developer_faq.html
### Developer Workflow and Contribution
- https://github.com/OpenMS/OpenMS
- https://github.com/OpenMS/OpenMS/issues
- https://github.com/OpenMS/OpenMS/wiki#-for-developers
- https://github.com/OpenMS/OpenMS/wiki/Coding-conventions
- https://github.com/OpenMS/OpenMS/wiki/Write-tests
- https://github.com/OpenMS/OpenMS/wiki/pyOpenMS#wrap
- https://pyopenms.readthedocs.io/en/latest/wrap_classes.html
- https://openms.readthedocs.io/en/latest/contribute-to-openms/pull-request-checklist.html
- https://github.com/OpenMS/OpenMS/wiki/Pull-Request-Checklist
- https://github.com/OpenMS/OpenMS/wiki/Preparation-of-a-new-OpenMS-release#release_developer
- http://nvie.com/posts/a-successful-git-branching-model/
- https://help.github.com/articles/fork-a-repo
- https://help.github.com/articles/syncing-a-fork
- https://help.github.com/articles/using-pull-requests
- http://cdash.seqan.de/index.php?project=OpenMS
- https://github.com/OpenMS/OpenMS/tags
### Build/Install Guides
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/install_linux.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/install_mac.html
- https://abibuilder.cs.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/install_win.html
- https://github.com/OpenMS/THIRDPARTY
- https://pkgs.org/search/?q=openms
- http://manpages.ubuntu.com/manpages/hardy/man1/ctest.1.html
- http://www.cmake.org
- http://cmake.org/
- https://visualstudio.microsoft.com/de/downloads/?q=build+tools
- http://www.7-zip.org/
- https://www.qt.io/download
- https://wiki.qt.io/Building_Qt_6_from_Git
- https://developer.apple.com/xcode/
- https://brew.sh/
- http://www.OpenMS.de/download/
### Coding and Tooling
- https://clang.llvm.org/docs/ClangFormat.html
- https://devblogs.microsoft.com/cppblog/clangformat-support-in-visual-studio-2017-15-7-preview-1/
- https://git-scm.com/
- http://www.doxygen.org
- http://www.doxygen.org/index.html
- https://llvm.org/builds/
- https://docs.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2471?view=msvc-170
- https://github.com/OpenMS/autowrap/blob/master/docs/README.md
### Testing and Profiling Tools
- https://openms.readthedocs.io/en/latest/docs/topp/adding-new-tool-to-topp.html#how-do-I-add-a-new-TOPP-test
- https://perf.wiki.kernel.org/index.php/Main_Page
- https://github.com/KDAB/hotspot
- http://sandsoftwaresound.net/perf/perf-tutorial-hot-spots/
- http://valgrind.org/docs/manual/
- https://github.com/cbielow/wintime
- http://www.dependencywalker.com/
### Packaging and Containers
- https://github.com/orgs/OpenMS/packages
- https://github.com/OpenMS/NSIS
- http://miktex.org/
- http://www.ghostscript.com/
- http://www.graphviz.org
| Markdown |
3D | OpenMS/OpenMS | CONTRIBUTING.md | .md | 3,337 | 43 | Help us to make OpenMS better and become part of the OpenMS open-source community.
This document is displayed because you either opened an issue or you want to provide your code as a pull request for inclusion into OpenMS. Please take a look at the appropriate section below to find some details on how we handle this process.
When interacting with other developers, users or anyone else from our community, please adhere to
[our CODE OF CONDUCT](https://github.com/OpenMS/OpenMS/blob/develop/CODE_OF_CONDUCT.md)
# Reporting an Issue:
You most likely came here to:
- report bugs or annoyances
- pose questions
- point out missing documentation
- request new features
To do so, open a [new issue](https://github.com/OpenMS/OpenMS/issues/new/choose) describing the bug, your question, etc.
If you found a bug, e.g. an OpenMS tool crashes during data processing, it is essential to provide some basic information:
- the OpenMS version you are running
- the platform you are running OpenMS on (Windows 10, ...)
- how you installed OpenMS (e.g., from within KNIME, binary installers, self compiled)
- a description on how to reproduce the bug
- relevant tool output (e.g., error messages)
- data to repoduce the bug (If possible as a GitHub gist. Other platforms like Dropbox, Google Drive links also work. If you can't share the data publicly please indicate this and we will contact you in private.)
If you are an official OpenMS team member:
- label your issue using github labels (e.g. as: question, defect) that indicate the type of issue and which components of OpenMS (blue labels) are affected. The severity is usually assigned by OpenMS maintainers and used internally to e.g. indicate if a bug is a blocker for a new release.
# Opening a Pull Request
Before getting started we recommend taking a look at our GitHub-Wiki: https://github.com/OpenMS/OpenMS/wiki#-for-developers
Before you open the pull request, make sure you
- adhere to [our coding conventions](https://github.com/OpenMS/OpenMS/wiki/Coding-conventions)
- have [unit tests and functional tests](https://github.com/OpenMS/OpenMS/wiki/Write-tests) - see also [this example](https://github.com/OpenMS/OpenMS/blob/develop/src/tests/class_tests/openms/source/MSNumpressCoder_test.cpp)
- Have [proper documentation](https://github.com/OpenMS/OpenMS/wiki/Coding-conventions#doxygen) - see also [this example](https://github.com/OpenMS/OpenMS/blob/develop/src/openms/include/OpenMS/FORMAT/MSNumpressCoder.h)
- have [Python bindings](https://pyopenms.readthedocs.io/en/latest/wrap_classes.html) - see also [these instructions](https://github.com/OpenMS/OpenMS/wiki/pyOpenMS#wrap) and [this example](https://github.com/OpenMS/OpenMS/blob/develop/src/pyOpenMS/pxds/MSNumpressCoder.pxd)
A core developer will review your changes to the main development branch (develop) and approve them (or ask for modifications). You may indicate the prefered reviewer(s) by adding links to them in a comment section (e.g., @cbielow @hendrikweisser @hroest @jpfeuffer @timosachsenberg)
Also consider getting in contact with the core developers early. They might provide additional guidance and valuable information on how your specific aim is achieved. This might give you a head start in, for example, developing novel tools or algorithms.
Happy coding!
| Markdown |
3D | OpenMS/OpenMS | ARCHITECTURE.MD | .MD | 15,270 | 319 | # OpenMS Architecture Documentation
## Introduction
OpenMS is an open‐source software framework dedicated to mass spectrometry data analysis in proteomics, metabolomics, and related fields. It provides a robust, flexible, and extensible platform that supports rapid algorithm development, data processing, and the creation of custom analysis pipelines. This document outlines the core architectural components, design principles, and future directions of the OpenMS project.
## Architecture Principles
- **Modularity & Extensibility:** Built as a collection of loosely coupled libraries and tools, OpenMS allows developers to easily integrate new algorithms and extend existing functionality.
- **Performance & Scalability:** Written in modern C++ and optimized for parallel processing, OpenMS is designed to handle large datasets efficiently.
- **Portability:** OpenMS runs natively on Windows, Linux, and macOS.
- **Robustness & Reliability:** Extensive error handling, automated unit and integration tests, and continuous integration (e.g., via CDASH) ensure high-quality, reproducible analyses.
- **User-Centric Design:** With a suite of command-line tools (TOPP), graphical interfaces (TOPPView, TOPPAS), and Python bindings (pyOpenMS), OpenMS caters to both developers and end users.
## Components
### 1. OpenMS Library
- **Kernel Classes**: Essential data structures for representing mass spectrometry data
- `Peak1D`: Basic data structure for mass-to-charge ratio (m/z) and intensity pairs
- `ChromatogramPeak`: Data structure for retention time and intensity pairs
- `MSSpectrum`: Represents a single mass spectrum
- `MSExperiment`: Container for multiple spectra and chromatograms
- `Feature`: Represents a detected feature in a mass spectrum, characterized by properties like m/z, retention time, and intensity.
- `FeatureMap`: Container for features detected in a sample
- `ConsensusMap`: Container for features identified across multiple samples or conditions
- `PeptideIdentification`: Container for peptide identification results
- `ProteinIdentification`: Container for protein identification results
- ...
- **Algorithms:**
Implements key algorithms for signal processing, feature detection, quantification, peptide identification, protein inference, alignment, and others.
- **File Handling & Format Support:**
- Robust support for standard MS file formats (mzML, mzXML, mgf, mzIdentML, TraML, ...).
- Utilities for file conversion and handling compressed data are also provided.
### 2. TOPP Tools (The OpenMS Pipeline Tools)
TOPP consists of comprehensive suite of command-line tools built on top of the OpenMS library that can be easily integrated into workflow systems.
- **Tool Architecture**:
- Common interface for all tools, ensuring consistency.
- Standardized parameter handling system for configuring tool behavior.
- Logging and error reporting mechanisms for debugging and monitoring.
- Progress monitoring to provide feedback during long-running processes.
- Input/output file handling to manage data flow between tools.
### 3. User Interfaces
- **Graphical Tools:**
- **TOPPView:** A dedicated viewer for raw spectra, chromatograms, and identification results.
- Raw data visualization
- Spectrum and chromatogram viewing
- 2D/3D data representation
- Identification result visualization
- Interactive data analysis
### 4. Scripting
- **Scripting & API: pyOpenMS**
- Python bindings that expose core functionalities
- Rapid prototyping of algorithms in Python
- Integration with other scientific Python libraries (e.g., NumPy, SciPy)
- Development of custom data processing workflows
- Interactive data analysis and visualization in Jupyter notebooks
#### pyOpenMS Generation with Autowrap
The Python bindings for OpenMS are generated through a specialized process:
- **Autowrap Tool**: A custom Python tool developed specifically for generating OpenMS bindings. ([README](https://github.com/OpenMS/autowrap/blob/master/docs/README.md)).
- **Declaration Process**:
- Developers declare classes and functions to be exposed to Python in `.pxd` files located in the `pxds/` directory
- These declarations specify the function signatures and class hierarchies
- **Code Generation Workflow**:
1. Autowrap reads the declarations and automatically generates wrapping code
2. Manual wrapper code can be added in the `addons/` folder for functionality that can't be wrapped automatically
3. The output is a `pyopenms.pyx` file in the `pyopenms/` directory
4. Cython translates this `.pyx` file to C++ code (`pyopenms.cpp`)
5. A C++ compiler compiles this code into a Python module importable via `import pyopenms`
- **Customization Points**:
- Special annotations like `wrap-ignore`, `wrap-as`, `wrap-iter-begin`, etc. can be used to customize the wrapping process
- Allows for fine-grained control over how C++ classes and methods are exposed to Python
- **Complex C++ Features**: Autowrap handles many advanced C++ features automatically (with some caveats):
- Operator overloading (e.g., implementing Python's `__add__`, `__getitem__`)
- Complex template instantiations
- Multiple inheritance scenarios
- Static methods and variables
#### Manual Extensions with PYX Files
While autowrap handles most standard C++ to Python wrapping automatically, manual PYX files (in the `addons/` directory) enable advanced functionality that goes beyond simple wrapping:
- **Python-Specific Enhancements**:
- Adding Pythonic interfaces to C++ classes
- Custom exception handling and translation between C++ and Python exceptions
- Implementing Python protocols (e.g., iteration, context managers)
- **Performance Optimizations**:
- Custom memory management
- Efficient data conversion between NumPy arrays and OpenMS data structures
- Specialized handling for large datasets
- **Integration with Python Ecosystem**:
- Converters to/from pandas DataFrames
- Matplotlib visualization helpers
- Integration with scientific Python libraries
- **Extension Methods**: Adding methods that exist only in Python and not in the C++ API, such as:
- Convenience functions for common operations
- Python-specific utility methods
- Simplified interfaces for complex C++ functionality
## Data Flow and Processing Workflow
### Data Processing Pipeline
```mermaid
flowchart TD
A[Raw Data] --> B[Open file format]
B --> C[Analysis Pipeline (e.g., set of TOPP Tools, pyOpenMS Scripts)]
C --> D[Export]
D --> E[Visualization and Downstream Processing]
```
### Component Interactions
OpenMS components interact in a layered architecture:
```mermaid
flowchart TD
A[Raw Data Files] --> B[File Handlers]
B --> C[Data Structures]
C --> D[Algorithms]
D --> E[Tools/Applications]
E --> F[Workflow Systems]
G[Python Scripts] --> C
G --> D
G --> E
H[GUI Applications] --> B
H --> C
H --> D
```
- **Data Structures Layer**: Core data types like MSSpectrum, Feature, etc.
- **Algorithms Layer**: Processing algorithms implemented in C++
- **Tools Layer**: Command-line tools for specific tasks
- **Workflow Layer**: Pipeline systems for connecting multiple tools
## Extension Points and Customization
The OpenMS architecture is designed for extensibility at multiple levels:
### Algorithm Integration
- Well-defined interfaces enable the addition of new data processing and analysis algorithms
- Abstract base classes with consistent interfaces allow algorithm plugins
- Template-based design patterns for algorithm families (e.g., feature finders, peak pickers)
### File Format Support
- File handlers and extension hooks allow support for additional file formats
- Adapter pattern for integrating external libraries and parsers
- Extensible import/export framework with plugin architecture
### Tool Development
- Developers can build new TOPP tools by subclassing common base classes
- The `TOPPBase` class provides standardized parameter handling, logging, and I/O capabilities
- Integration with the standardized parameter and logging systems
- Tools can be built as standalone applications or library components
### Workflow Customization
- Users can combine OpenMS tools with custom scripts (e.g., via pyOpenMS)
- Support for workflow systems like KNIME and Galaxy
- Parameter files (INI format) for tool configuration and chaining
- TOPPAS workflow editor for visual pipeline construction
### Python Extensions
- Development of new algorithms in Python using pyOpenMS
- Integration with the Python scientific ecosystem
- Custom data processing pipelines in Python notebooks
## Build System
- **CMake Configuration:**
OpenMS uses a CMake-based build system that ensures platform-independent compilation and simplifies dependency management.
- Handles dependencies through a combination of system-provided libraries and vendored code
- Configures build options for different platforms and compilers
- Manages the generation of pyOpenMS bindings when the `-DPYOPENMS=ON` option is set
- Uses vcpkg for consistent dependency management across platforms
- **Automated Testing & CI:**
A comprehensive suite of unit tests, integration tests, and nightly builds (e.g., via CDASH) maintain code quality and facilitate rapid detection of issues.
- Tests are built and run through CMake/CTest
- Continuous integration workflows automate testing across different platforms
- Test coverage reports help identify untested code regions
## Parallel Processing and Performance Optimization
### Parallelization Mechanisms
- **OpenMP Integration**: OpenMS uses OpenMP as the primary parallelization backend
- Parallel algorithms for computationally intensive tasks
- Configurable thread utilization based on available resources
- Thread-safe data structures for parallel processing
### Performance Considerations
- **Memory Management**: Optimized data structures for handling large datasets
- **Algorithm Complexity**: Carefully designed algorithms to minimize computational complexity
- **I/O Optimization**: Efficient file handling for large mass spectrometry data files
- **Vectorization**: Use of SIMD instructions where applicable for compute-intensive operations
## Documentation Standards and Resources
### Code Documentation
- **API Documentation**: Doxygen-generated comprehensive API documentation
- **Inline Comments**: Structured in-code documentation following consistent standards
- **Coding Standards**: Style guidelines ensuring clarity and maintainability
- **Example Code**: Annotated examples demonstrating key functionality
### User Documentation
- **User Guides**: Comprehensive guides for different user levels (beginners to experts)
- **Tutorials**: Step-by-step tutorials for common tasks and workflows
- **Example Workflows**: Pre-configured workflows for typical analysis scenarios
- **FAQ and Troubleshooting**: Common issues and their solutions
### Developer Resources
- **Architecture Documentation**: High-level design documents (like this one)
- **Contribution Guidelines**: Clear guidelines for code contributions
- **Development Workflows**: Processes for feature development, bug fixing, and code review
- **Design Patterns**: Documentation of common patterns used throughout the codebase
## Testing Strategy
OpenMS employs a comprehensive testing strategy with tests organized in different directories based on their purpose:
### Class Tests
- Located in `src/tests/class_tests/openms/`
- Unit tests for OpenMS library components
- Tests individual classes and functions for correctness
- Follows a structured naming convention (e.g., `ClassNameTest.cpp` tests the `ClassName` class)
### GUI Tests
- Located in `src/tests/class_tests/openms_gui/`
- Tests specifically for graphical components
- Includes tests for visualization tools and user interfaces
### TOPP Tests
- Located in `src/tests/topp/`
- Tests for The OpenMS Pipeline Tools
- Integration tests that ensure command-line tools function correctly
- Tests include input files and expected output files for comparison
### Python Tests (pyOpenMS)
- Located in `src/pyOpenMS/tests/`
- Contains several types of tests:
- `unittests/`: Tests for individual Python wrapped classes
- `integration_tests/`: Tests combining multiple components
- `memoryleaktests/`: Tests ensuring no memory leaks occur in Python bindings
### Continuous Integration
- Tests are run automatically via CI pipelines
- Ensures code quality across different platforms and environments
- Prevents regression bugs when new code is introduced
## Deployment and Distribution
OpenMS and its Python bindings (pyOpenMS) are distributed through several channels to suit different use cases and environments:
- **Standalone Installers:**
- For Windows and macOS, standalone installers (e.g., drag-and-drop installers for macOS) are provided for releases.
- **Bioconda:**
- OpenMS (and its library component `libopenms`) as well as its tools are available via the Bioconda channel.
- The Python bindings, **pyOpenMS**, are available on Bioconda or pypi.
- **Container Images:**
- Docker and Singularity container images are provided through the OpenMS GitHub Container Registry as well as via BioContainers. These images bundle the OpenMS library, executables, and pyOpenMS so that users can deploy OpenMS in cloud or HPC environments with minimal setup.
## Maintenance Guidelines
- **Code Reviews:**
All contributions undergo peer review to maintain quality and adherence to coding standards.
- **Release Management:**
Flexible release cycles with defined versioning protocols
- **Issue Tracking:**
Community-reported issues and feature requests are managed via GitHub issues
## Project Structure
The OpenMS codebase follows a structured organization:
```
OpenMS/
├── cmake/ # CMake build system files and modules
├── contrib/ # Third-party dependencies
├── doc/ # Documentation files
├── share/ # Shared resources
├── src/ # Source code
│ ├── openms/ # Core OpenMS library
│ ├── openms_gui/ # GUI components
│ ├── openswathalgo/ # OpenSWATH algorithms
│ ├── pyOpenMS/ # Python bindings
│ │ ├── pxds/ # Class declarations for autowrap
│ │ ├── addons/ # Manual wrapping code
│ │ ├── pyopenms/ # Generated Python module
│ │ └── tests/ # Tests for Python bindings
│ ├── tests/ # C++ tests
│ │ ├── class_tests/ # Unit tests for classes
│ │ │ ├── openms/ # Tests for core library
│ │ │ └── openms_gui/ # Tests for GUI components
│ │ └── topp/ # Tests for TOPP tools
│ └── topp/ # TOPP tools implementation
└── tools/ # Development tools and scripts
```
## Contributing
For detailed information about contributing to OpenMS, please refer to the CONTRIBUTING.md file in the repository.
| Markdown |
3D | OpenMS/OpenMS | CODE_OF_CONDUCT.md | .md | 6,242 | 74 | ### OpenMS Code of Conduct
#### Code of Conduct (Summary View)
Below is a summary of the OpenMS Code of Conduct.
We are dedicated to providing a welcoming and supportive environment for all people, regardless of background or identity. By participating in this community, participants accept to abide by OpenMS' Code of Conduct and accept the procedures by which any Code of Conduct incidents are resolved. Any form of behaviour to exclude, intimidate, or cause discomfort is a violation of the Code of Conduct. In order to foster a positive and professional learning environment we encourage the following kinds of behaviours in all platforms and events:
* Use welcoming and inclusive language
* Be respectful of different viewpoints and experiences
* Gracefully accept constructive criticism
* Focus on what is best for the community
* Show courtesy and respect towards other community members
If you believe someone is violating the Code of Conduct, we ask that you report it to the Code of Conduct Committee, who will take the appropriate action to address the situation.
#### Code of Conduct (Detailed View)
Part 1. Introduction
OpenMS is a community-led project. We value the involvement of everyone in the community. We are committed to creating a friendly and respectful place for learning, teaching and contributing. All participants in our events and communications are expected to show respect and courtesy to others.
To make clear what is expected, everyone participating in OpenMS activities is required to conform to the Code of Conduct. This Code of Conduct applies to all spaces managed by OpenMS including, but not limited to, workshops, email lists, and online forums such as GitHub, Slack and Twitter. Workshop hosts are expected to assist with the enforcement of the Code of Conduct.
The OpenMS Code of Conduct Committee is responsible for enforcing the Code of Conduct. It can be contacted by emailing [open-ms-c_o_c@lists.sourceforge.net](mailto:open-ms-c_o_c@lists.sourceforge.net).
All reports will be reviewed by the Code of Conduct Committee and will be kept confidential.
Part 2. OpenMS Code of Conduct
OpenMS is dedicated to providing a welcoming and supportive environment for all people, regardless of background or identity. As such, we do not tolerate behaviour that is disrespectful to our teachers or learners or that excludes, intimidates, or causes discomfort to others. We do not tolerate discrimination or harassment based on characteristics that include, but are not limited to, gender identity and expression, sexual orientation, disability, physical appearance, body size, citizenship, nationality, ethnic or social origin, pregnancy, familial status, veteran status, genetic information, religion or belief (or lack thereof), membership of a national minority, property, age, education, socio-economic status, technical choices, and experience level.
Everyone who participates in OpenMS activities is required to conform to this Code of Conduct. It applies to all spaces managed by OpenMS including, but not limited to, workshops, email lists, and online forums such as GitHub, Gitter and Twitter. Workshop hosts are expected to assist with the enforcement of the Code of Conduct. By participating, participants indicate their acceptance of the procedures by which OpenMS resolves any Code of Conduct incidents, which may include storage and processing of their personal information.
Part 2.1 Expected behaviour
All participants in our events and communications are expected to show respect and courtesy to others. All interactions should be professional regardless of platform: either online or in-person. In order to foster a positive and professional learning environment we encourage the following kinds of behaviours in all OpenMS events and platforms:
* Use welcoming and inclusive language
* Be respectful of different viewpoints and experiences
* Gracefully accept constructive criticism
* Focus on what is best for the community
* Show courtesy and respect towards other community members
Note: See the [four social rules](https://www.recurse.com/manual#sub-sec-social-rules) for further recommendations.
Part 2.2 Unacceptable behaviour
Examples of unacceptable behaviour by participants at any OpenMS event/platform include:
- written or verbal comments which have the effect of excluding people on the basis of membership of any specific group
- causing someone to fear for their safety, such as through stalking, following, or intimidation
- violent threats or language directed against another person
- the display of sexual or violent images
- unwelcome sexual attention
- nonconsensual or unwelcome physical contact
- sustained disruption of talks, events or communications
- insults or put downs
- sexist, racist, homophobic, transphobic, ableist, or exclusionary jokes
- excessive swearing
- incitement to violence, suicide, or self-harm
- continuing to initiate interaction (including photography or recording) with someone after being asked to stop
- publication of private communication without consent
Part 2.3 Consequences of Unacceptable behaviour
Participants who are asked to stop any inappropriate behaviour are expected to comply immediately. This applies to any OpenMS events and platforms, either online or in-person. If a participant engages in behaviour that violates this code of conduct, the organisers may warn the offender, ask them to leave the event or platform (without refund), or engage OpenMS Code of Conduct Committee to investigate the Code of Conduct violation and impose appropriate sanctions.
#### About this Document
This document is adapted from guidelines written by the [The Carpentries Project](https://github.com/carpentries/handbook/blob/master/topic_folders/policies/code-of-conduct.md), which was itself based on the [Django Project](https://www.djangoproject.com/conduct/enforcement-manual/) and [Ada Initiative](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Responding_to_reports) template and the [PyCon 2013 Procedure for Handling Harassment Incidents](https://us.pycon.org/2013/about/code-of-conduct/harassment-incidents/).
[licensed CC BY 3.0](http://creativecommons.org/licenses/by/3.0/)
| Markdown |
3D | OpenMS/OpenMS | PULL_REQUEST_TEMPLATE.md | .md | 1,765 | 35 | ## Description
<!-- Please include a summary of the change and which issue is fixed here. -->
## Checklist
- [ ] Make sure that you are listed in the AUTHORS file
- [ ] Add relevant changes and new features to the CHANGELOG file
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] New and existing unit tests pass locally with my changes
- [ ] Updated or added python bindings for changed or new classes (Tick if no updates were necessary.)
### How can I get additional information on failed tests during CI
<details>
<summary>Click to expand</summary>
If your PR is failing you can check out
- The details of the action statuses at the end of the PR or the "Checks" tab.
- http://cdash.seqan.de/index.php?project=OpenMS and look for your PR. Use the "Show filters" capability on the top right to search for your PR number.
If you click in the column that lists the failed tests you will get detailed error messages.
</details>
### Advanced commands (admins / reviewer only)
<details>
<summary>Click to expand</summary>
- `/reformat` (experimental) applies the clang-format style changes as additional commit. Note: your branch must have a different name (e.g., yourrepo:feature/XYZ) than the receiving branch (e.g., OpenMS:develop). Otherwise, reformat fails to push.
- setting the label "NoJenkins" will skip tests for this PR on jenkins (saves resources e.g., on edits that do not affect tests)
- commenting with `rebuild jenkins` will retrigger Jenkins-based CI builds
</details>
---
:warning: Note: Once you opened a PR try to minimize the number of *pushes* to it as every push will trigger CI (automated builds and test) and is rather heavy on our infrastructure (e.g., if several pushes per day are performed).
| Markdown |
3D | OpenMS/OpenMS | LICENSE.md | .md | 1,609 | 30 | BSD 3-Clause License
Copyright (c) 2002-present, OpenMS Inc. -- Eberhard Karls University Tuebingen, ETH Zurich, and Freie Universitaet Berlin
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| Markdown |
3D | OpenMS/OpenMS | tools/changelog_helper.sh | .sh | 5,984 | 172 | #!/bin/bash
# Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
# SPDX-License-Identifier: BSD-3-Clause
#
# --------------------------------------------------------------------------
# $Maintainer: Johannes Veit $
# $Authors: Johannes Veit $
# --------------------------------------------------------------------------
############################################################################
#
# Compare the binary folders of two OpenMS releases (old / new) and output:
#
# - Discontinued tools
# - New tools
# - Parameter changes from generated INI files
#
# Worked for me on MacOS comparing release 2.0 (built from source) vs.
# release 1.11.1 (installed using binary installer). Might not run
# out-of-the-box on linux because of differences in sed syntax etc.
#
############################################################################
# check command line arguments
if [[ $# != 2 ]]
then
echo "Usage: $0 <old-release-bin-dir> <new-release-bin-dir>"
exit 1
fi
# directories containing TOPP binaries
BIN_DIR_OLD=$1
BIN_DIR_NEW=$2
# tmp files
SYSTEM_TMP_DIR=/tmp
TMP_DIR=${SYSTEM_TMP_DIR}/OpenMS_changelog_helper
mkdir ${TMP_DIR}
mkdir ${TMP_DIR}/inis
mkdir ${TMP_DIR}/inis/old
mkdir ${TMP_DIR}/inis/new
TMP_FILE_OLD=${TMP_DIR}/tool_list_old.txt
TMP_FILE_NEW=${TMP_DIR}/tool_list_new.txt
TMP_FILE_COMM=${TMP_DIR}/common_tools.txt
# store relevant tool names in tmp files (remove e.g., GUI tools)
ls -la ${BIN_DIR_OLD}/ \
| awk '{print $9}' \
| sort \
| grep -v -e "Tutorial\|TOPPAS\|TOPPView\|INIFileEditor\|SEARCHENGINES\|OpenMSInfo\|GenericWrapper\|SwathWizard\|FLASHDeconvWizard\|Testing" \
| grep -v -e "\.$" \
| grep -v -e "^$" \
> ${TMP_FILE_OLD}
ls -la ${BIN_DIR_NEW}/ \
| awk '{print $9}' \
| sort \
| grep -v -e "Tutorial\|TOPPAS\|TOPPView\|INIFileEditor\|SEARCHENGINES\|OpenMSInfo\|GenericWrapper\|SwathWizard\|FLASHDeconvWizard\|Testing" \
| grep -v -e "\.$" \
| grep -v -e "^$" \
> ${TMP_FILE_NEW}
# find removed tools and new tools
for s in ADDED REMOVED
do
BIN_DIR=${BIN_DIR_NEW}
GREP_CHAR=">"
if [[ $s == "REMOVED" ]]
then
BIN_DIR=${BIN_DIR_OLD}
GREP_CHAR="<"
fi
echo
echo "- $s:"
echo
diff ${TMP_FILE_OLD} ${TMP_FILE_NEW} \
| grep -e "^${GREP_CHAR}" \
| sort \
| while read i
do
TOOL_NAME=$(echo $i | sed -E "s/^. //")
TOOL_DESCR=$(LD_LIBRARY_PATH=${BIN_DIR}/../lib:${LD_LIBRARY_PATH} ${BIN_DIR}/${TOOL_NAME} --help 2>&1 | grep " -- " | head -n 1 | sed -E 's/.* -- (.*)$/\1/' | sed -E 's/\.$//')
if [[ ${TOOL_DESCR} != "" ]]
then
echo " - ${TOOL_NAME} -- ${TOOL_DESCR}"
else
echo " - ${TOOL_NAME}"
fi
done
echo
done
# store names of tools present in both old and new release in tmp file
comm -12 ${TMP_FILE_OLD} ${TMP_FILE_NEW} > ${TMP_FILE_COMM}
# print changed parameters as tab-separated table
echo
echo "- CHANGED PARAMETERS:"
echo
echo -e "Tool name\tAdded/removed\tParameter name\tType\tDefault value\tRestrictions\tSupported formats"
# write ini files for old and new tools, modify them on the fly:
#
# - remove stuff where we're not interested in changes
# - replace parameter names with:this:notation for nested parameters
#
# (=> result is not a valid INI file anymore)
for INI_SUB_DIR in old new
do
BIN_DIR=${BIN_DIR_NEW}
if [[ ${INI_SUB_DIR} == "old" ]]
then
BIN_DIR=${BIN_DIR_OLD}
fi
cat ${TMP_FILE_COMM} | while read t
do
# reset subsection prefix
p=
# generate pseudo ini file
LD_LIBRARY_PATH=${BIN_DIR}/../lib:${LD_LIBRARY_PATH} ${BIN_DIR}/$t -write_ini - \
| grep -v "name=\"version\"" \
| sed -E 's/description="[^"]*"|required="[^"]*"|advanced="[^"]*"//g' \
| sed -E 's/restrictions="[^"]*pyrophospho[^"]*"/ restrictions="..."/' \
| sed -E 's/tags="is_executable"//g' \
| while read l
do
# new NODE -> append subsection to prefix p
echo $l | grep "<NODE" &> /dev/null && p=$p$(echo $l | sed -E 's/.*name="([^"]+)".*/\1/'): && continue
# NODE finished -> remove last subsection from prefix p
echo $l | grep "</NODE" &> /dev/null && p=$(echo $p | sed -E 's/[^:]+:$//') && continue
# otherwise, substitute name -> name:with:subsections
echo $l | sed -E 's/name="([^"]*)"/name="'$p'\1"/'
done \
> ${TMP_DIR}/inis/${INI_SUB_DIR}/$t.pseudo.ini 2> /dev/null
done
done
# sort pseudo ini file
cat ${TMP_FILE_COMM} | while read t
do
sort ${TMP_DIR}/inis/old/$t.pseudo.ini -o ${TMP_DIR}/inis/old/$t.pseudo.ini
sort ${TMP_DIR}/inis/new/$t.pseudo.ini -o ${TMP_DIR}/inis/new/$t.pseudo.ini
done \
# compute diffs of pseudo ini files and output tab-separated table of changed parameters
cat ${TMP_FILE_COMM} | while read t
do
diff -d ${TMP_DIR}/inis/old/$t.pseudo.ini ${TMP_DIR}/inis/new/$t.pseudo.ini
done \
| grep "<ITEM" \
| sed -E 's/^<[[:space:]]+/- /' \
| sed -E 's/^>[[:space:]]+/+ /' \
| perl -0777 -pe 's/\n+/\n/g' \
| while read l
do
T_NAME=$(echo $l | sed -E 's/.*name="([^":]*):.*/\1/')
P_ADD_REM=$(echo $l | sed -E 's/^(.).*/\1/')
P_NAME=$(echo $l | sed -E 's/.*name="[^":]+:.:([^"]*)".*/\1/')
P_TYPE=$(echo $l | grep "type=" | sed -E 's/.*type="([^"]*)".*/\1/')
P_VALUE=$(echo $l | grep "value=" | sed -E 's/.*value="([^"]*)".*/\1/')
P_RESTRICTIONS=$(echo $l | grep "restrictions=" | sed -E 's/.*restrictions="([^"]*)".*/\1/')
P_FORMATS=$(echo $l | grep "supported_formats=" | sed -E 's/.*supported_formats="([^"]*)".*/\1/')
echo -e "${T_NAME}\t${P_ADD_REM}\t${P_NAME}\t${P_TYPE}\t${P_VALUE}\t${P_RESTRICTIONS}\t${P_FORMATS}"
done | sort
#cleanup
rm -rf ${TMP_DIR}
| Shell |
3D | OpenMS/OpenMS | tools/PythonExtensionChecker.py | .py | 52,975 | 1,278 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
# SPDX-License-Identifier: BSD-3-Clause
#
# --------------------------------------------------------------------------
# $Maintainer: Hannes Roest$
# $Authors: Hannes Roest$
# --------------------------------------------------------------------------
"""
from __future__ import print_function
import glob, os, sys
import re, time
import argparse
from xml.sax.saxutils import escape as xml_escape
from PythonCheckerLib import parse_pxd_file
from PythonCheckerLib import create_pxd_file_map
# Try non-standard libs
try:
import yaml
import breathe
from Cython.Compiler.Nodes import CEnumDefNode, CppClassNode, CTypeDefNode, CVarDefNode, CImportStatNode, CDefExternNode
from autowrap.PXDParser import CppClassDecl, CTypeDefDecl, MethodOrAttributeDecl, EnumDecl
except ImportError as e:
print ("You need to install a few packages for this library to work")
print ("Please use:")
print (" pip install breathe")
print (" pip install pyyaml")
print (" pip install autowrap")
print (" pip install Cython")
raise e
# Try breathe parser
try:
from breathe.parser.eoxygen.compound import parse as doxygen_parse
except ImportError:
print ("importing breathe.parser.doxygen.compound failed, try new API")
from breathe.parser.compound import parse as doxygen_parse
# Matching to match doxygen methods to Cython pxd functions
def handle_member_definition(mdef, pxd_class, cnt):
""" Matches a doxygen member definition (mdef) to a Cython pxd file.
This tries to ensure that all C++ functions are wrapped and have an
equivalent in the Python wrapper.
Parameters
----------
mdef : breathe.parser.compound.memberdefTypeSub
A doxygen entry
pxd_class : autowrap.PXDParser.CppClassDecl
A PXD class file as parsed by autowrap
cnt :
A count object to keep track of how many functions we wrapped
"""
tres = TestResult()
protection = mdef.get_prot() # DoxProtectionKind: public, protected, private, package
kind = mdef.get_kind() # DoxMemberKind: define property event variable typedef enum function signal prototype friend dcop slot
if not protection in "public protected private package".split(" "):
raise Exception("Error; something is wrong")
if not kind in "variable enum function define property event typedef signal prototype friend dcop slot".split(" "):
raise Exception("Error; something is wrong")
# Only match public enums, variables, functions
if protection in "protected private package".split(" "):
tres.setPassed(True)
elif kind in "define property event typedef signal prototype friend dcop slot".split(" "):
tres.setPassed(True)
elif kind == "enum" and protection == "public":
cnt.public_enums_total += 1
cython_file = parse_pxd_file(pxd_class.pxd_path)
found = False
for klass in cython_file:
if hasattr(klass[0], "name") and klass[0].name == mdef.get_name():
found = True
break
# Sometimes we rename things in pyOpenMS for sanity (and namespace consistency) sake
# E.g. OpenMS::PercolatorOutfile::ScoreType becomes PercolatorOutfile_ScoreType
# and we have to go back to the full cname. However, the doxygen name needs to be inferred
if hasattr(klass[0], "cname") and klass[0].cname.endswith(mdef.get_name()):
assumed_fullname = mdef.compoundname + "::" + mdef.get_name()
if (assumed_fullname == klass[0].cname):
found = True
break
else:
print ("Something went wrong, %s is not equal to %s" % (assumed_fullname, klass[0].cname))
if not found:
tres.setPassed(False)
tres.setMessage("TODO: Found enum in C++ but not in pxd: %s %s %s" % (mdef.kind, mdef.prot, mdef.name))
cnt.public_enums_missing += 1
comp_name = mdef.parent_doxy_file.compound.get_compoundname()
internal_file_name = mdef.parent_doxy_file.getInternalFileName()
namespace = comp_name
true_cppname = '"%s::%s"' % (comp_name, mdef.get_name())
enumr = "\n"
enumr += 'cdef extern from "<%s>" namespace "%s":\n' % (internal_file_name, namespace)
enumr += "\n"
enumr += ' cdef enum %s %s:\n' % (mdef.get_name(), true_cppname)
for val in mdef.get_enumvalue():
enumr += " %s\n" % val.get_name()
tres.setMessage(tres.getMessage() + enumr)
elif len(klass[0].items) != len(mdef.get_enumvalue()):
tres.setPassed(False)
tres.setMessage("TODO: Found enum in C++ with %s members but in Cython there are %s members: " % (
len(mdef.get_enumvalue()), len(klass[0].items) ) )
else:
tres.setPassed(True)
elif kind == "variable" and protection == "public":
attrnames = [a.name for a in pxd_class.attributes]
cnt.public_variables += 1
if not mdef.name in attrnames:
tres.setPassed(False)
tres.setMessage("TODO: Found attribute in C++ but not in pxd: %s %s %s" % (mdef.kind, mdef.prot, mdef.name) )
cnt.public_variables_missing += 1
else:
tres.setPassed(True)
elif kind == "function" and protection == "public":
# Wrap of public member functions ...
cnt.public_methods += 1
c_return_type = mdef.resolve_return_type()
if mdef.name in pxd_class.methods:
# Found match between C++ method and Python method
py_methods = pxd_class.methods[mdef.name]
if not isinstance(py_methods, list):
py_methods = [py_methods]
py_return_type = [str(d.result_type) for d in py_methods]
if mdef.definition == mdef.name:
# Constructor, no return type -> all is good
if len(c_return_type) != 0:
raise AssertionError()
tres.setPassed(True)
elif "void" in py_return_type and not "void" in c_return_type:
tres.setPassed(False)
tres.setMessage( "TODO: Mismatch between C++ return type (%s) and Python return type (%s) in %s %s %s:" % (
str(c_return_type), str(py_return_type), mdef.kind, mdef.prot, mdef.name) )
else:
tres.setPassed(True)
else:
# Missing method, lets remove false positives (destructors, operators, etc)
cnt.public_methods_missing += 1
if mdef.name.find("~") != -1:
# destructor
cnt.public_methods_missing_nowrapping += 1
tres.setPassed(True)
tres.setMessage("Cannot wrap destructor")
elif mdef.definition == mdef.name:
# constructor
find_match = False
for kk in pxd_class.methods:
if kk.split("_")[-1] == mdef.name:
find_match = True
if find_match:
cnt.public_methods_missing_nowrapping += 1
tres.setPassed(True)
tres.setMessage("Renamed constructor")
else:
tres.setPassed(False)
tres.setMessage(" -- TODO missing constructor in PXD: %s except + nogil " % mdef.format_definition_for_cython())
elif (mdef.name.find("operator") != -1 or
mdef.name.find("begin") != -1 or
mdef.name.find("end") != -1):
cnt.public_methods_missing_nowrapping += 1
tres.setPassed(True)
tres.setMessage("Cannot wrap method with iterator/operator %s" % mdef.name)
else:
tres.setPassed(False)
tres.setMessage(" -- TODO missing function in PXD: %s except + nogil " % mdef.format_definition_for_cython())
else:
# It is neither public function/enum/variable
tres.setPassed(True)
# Return the testresult
return tres
#
## Class for counting occurances
#
class Counter(object):
def __init__(self):
self.total = 0
self.skipped = 0
self.skipped_could_not_parse = 0
self.skipped_ignored = 0
self.skipped_protected = 0
self.skipped_no_location = 0
self.skipped_no_sections = 0
self.skipped_no_pxd_file = 0
self.skipped_no_pxd_match = 0
self.parsed = 0
#
self.public_enums_total = 0
self.public_enums_missing = 0
#
self.public_methods = 0
self.public_methods_missing = 0
self.public_methods_missing_nowrapping = 0
self.public_variables = 0
self.public_variables_missing = 0
def computed_skipped(self):
self.skipped = self.skipped_could_not_parse +\
self.skipped_ignored + \
self.skipped_protected + \
self.skipped_no_location + \
self.skipped_no_sections + \
self.skipped_no_pxd_file + \
self.skipped_no_pxd_match
def print_skipping_reason(self):
self.computed_skipped()
print ("Skipped files: %s" % self.skipped)
print ("- Could not parse xml: %s" % self.skipped_could_not_parse)
print ("- Could not parse location in xml: %s" % self.skipped_no_location)
print ("- Ignored per ignore-file: %s" % self.skipped_ignored)
print ("- Protected Compound: %s" % self.skipped_protected)
print ("- Could not find sections in xml: %s" % self.skipped_no_sections)
print ("- Could not find associated pxd file : %s" % self.skipped_no_pxd_file)
print ("- Could not find matching class in pxd file : %s" % self.skipped_no_pxd_match)
def print_stats(self):
self.computed_skipped()
print ("Total files: %s" % self.total)
print ("Skipped files: %s" % self.skipped)
print ("Parsed files: %s" % self.parsed)
print ("Parsed public methods %s (of which were missing %s and %s were operator/destructors) " % (self.public_methods, self.public_methods_missing, self.public_methods_missing_nowrapping))
print (" - wrapped %s " % (self.public_methods - self.public_methods_missing))
print (" - unwrapped operators/destructors %s " % (self.public_methods_missing_nowrapping))
print (" - unwrapped methods %s " % (self.public_methods_missing - self.public_methods_missing_nowrapping))
print ("Parsed public enums %s (of which were missing %s) " % (self.public_enums_total, self.public_enums_missing))
print ("Parsed public attributes %s (of which were missing %s) " % (self.public_variables, self.public_variables_missing))
print ("Note that this script counts each method name only once and only maps from \n"+ \
"C++ to Python (not the other way around), thus the numbers are slightly inaccurate.")
#
## Class for an OpenMS .h file
#
class OpenMSSourceFile(object):
"""
Class for an OpenMS .h file
Can parse out information on current maintainer stored in OpenMS-specific
format.
"""
def __init__(self, fname):
self.fname = fname
def getMaintainer(self):
try:
return self._getMaintainer()
except IOError:
return None
def _getMaintainer(self):
"""
// $Maintainer: xxx $
// $Authors: xxx $
"""
maintainer_reg = re.compile(".*\$\s*Maintainer:([^\$]*)\$")
with open(self.fname) as f:
data = f.read()
maintainer = maintainer_reg.search(data)
if maintainer is not None:
return maintainer.group(1).strip()
else:
return None
#
## Class holding Doxygen XML file and next one function declaration
#
class DoxygenXMLFile(object):
"""
The doxygen XML file
Abstracts the parsing of the Doxygen XML file and contains some reasoning
about the class (e.g. its members, is it pure abstract, etc.)
Can generate a viable PXD file from the doxygen information alone.
"""
def __init__(self, fname):
self.fname = fname
self.parsed_file = None
self.compound = None
self.parsing_error = False
self.parsing_error_message = None
def parse_doxygen(self):
try:
self.parsed_file = doxygen_parse(self.fname)
self.compound = self.parsed_file.get_compounddef()
return self.parsed_file
except Exception as e:
print ("Error parsing doxygen xml file", e.message)
self.parsing_error_message = e.message
self.parsing_error = True
return None
def getInternalFileName(self):
location = self.parsed_file.get_compounddef().get_location()
if location is None:
return None
return location.get_file()
def getCompoundFileLocation(self, source_dir):
location = self.parsed_file.get_compounddef().get_location()
if location is None:
return None
return os.path.realpath( os.path.join(source_dir, "src", "openms", "include", location.get_file()) )
def isEmpty(self, discard_defines=False):
compound = self.compound
if not discard_defines:
return len(compound.get_sectiondef()) == 0
# check whether there is more than defines and typdefs
empty = True
for mdef in self.iterMemberDef():
if not mdef.get_kind() in ["define", "typedef", "slot", "signal"]:
# DoxMemberKind: define property event variable typedef enum function signal prototype friend dcop slot
empty = False
if empty and not len(compound.get_sectiondef()) == 0:
# contains only typedefs etc
pass
return empty
def get_pxd_from_class(self, dfile, internal_file_name, xml_output_path):
"""
Generate a viable PXD file
"""
compound = dfile.compound
comp_name = compound.get_compoundname()
#
# Step 1: generate cimport includes
#
includes = ""
if len(compound.get_includes()) == 1:
try:
reffile = os.path.join(xml_output_path, compound.get_includes()[0].get_refid() + ".xml")
# read includes from ref file
dreffile = DoxygenXMLFile(reffile).parse_doxygen()
include_compound = dreffile.get_compounddef()
except Exception as e:
print ("Error: Could not read includes from file for compound %s with error %s" % (comp_name, e.message))
include_compound = compound
else:
include_compound = compound
for inc in include_compound.get_includes():
val = inc.getValueOf_()
if val.startswith("OpenMS"):
header_file = val.split("/")[-1]
if header_file.endswith(".h"):
header_file = header_file[:-2]
# We do not import certain headers since we did not wrap them in Python
if header_file in ["Exception", "Macros", "config", "StandardTypes"]:
continue
includes += "from %s cimport *\n" % header_file
#
# Step 2: class definition
#
parent_classes = [n.getValueOf_() for n in compound.basecompoundref]
namespace = "::".join(comp_name.split("::")[:-1])
preferred_classname = "_".join(comp_name.split("::")[1:])
preferred_classname = comp_name.split("::")[-1]
cldef = "\n"
cldef += 'cdef extern from "<%s>" namespace "%s":\n' % (internal_file_name, namespace)
cldef += "\n"
inherit_txt = ""
true_cppname = '"%s"' % comp_name
if len(parent_classes) > 0:
inherit_txt = "(%s)" % ",".join(parent_classes)
true_cppname = '' # Cython does not accept this if mixed with inheritance
if compound.templateparamlist is None:
cldef += ' cdef cppclass %s%s %s:\n' % (preferred_classname, inherit_txt, true_cppname)
else:
targs = [p.get_declname() for p in compound.templateparamlist.get_param()]
cldef += ' cdef cppclass %s[%s]%s:\n' % (preferred_classname, ",".join(targs), inherit_txt)
cldef += ' #\n'
cldef += ' # wrap-doc:\n'
cldef += ' # ADD PYTHON DOCUMENTATION HERE\n'
cldef += ' #\n'
if len(parent_classes) > 0:
cldef += ' # wrap-inherits:\n'
for p in parent_classes:
cldef += ' # %s\n' % (p)
# check if it is abstract, then do not attempt to wrap it
if self.isAbstract():
cldef += ' # wrap-ignore\n'
cldef += ' # ABSTRACT class\n'
#
# Step 3: methods and enums
#
methods = ""
default_ctor = False
copy_ctor = False
enum = ""
static_methods = ""
imports_needed = {}
for mdef in dfile.iterMemberDef():
if mdef.kind == "enum" and mdef.prot == "public":
# add enums
enum += '\n'
enum += 'cdef extern from "<%s>" namespace "%s":\n' % (internal_file_name, comp_name)
enum += ' cdef enum %s "%s":\n' % (mdef.name, comp_name + "::" + mdef.name)
enum += ' #wrap-attach:\n'
enum += ' # %s\n' % preferred_classname
for val in mdef.enumvalue:
enum += ' %s\n' % val.get_name()
if mdef.kind == "variable" and mdef.prot == "public":
# print ("var", mdef.name)
# cannot wrap const member variables
if mdef.definition.find("const") == -1:
methods += " %s\n" % mdef.format_definition_for_cython(False)
else:
methods += " # const # %s\n" % mdef.format_definition_for_cython(False)
elif mdef.kind == "function" and mdef.prot == "public":
if mdef.definition == mdef.name:
# Means we have a constructor
if mdef.get_argsstring().strip() == "()":
# Default constructor
default_ctor = True
continue
elif mdef.get_argsstring().strip().find(mdef.name) != -1 and \
mdef.get_argsstring().strip().find(",") == -1:
# Copy constructor
copy_ctor = True
continue
if mdef.name.find("~") != -1:
# Destructor
continue
# res += "do member function/attribute : ", mdef.kind, mdef.prot, mdef.name
declaration = mdef.format_definition_for_cython(False)
DoxygenCppFunction.compute_imports(declaration, imports_needed)
if declaration.find("operator=(") != -1:
# assignment operator, cannot be overriden in Python
continue
if mdef.definition.find("static") != -1:
methods += " # TODO: static # %s except + nogil \n" % declaration
static_methods += " %s except + nogil # wrap-attach:%s\n" % (declaration, preferred_classname)
continue
methods += " %s except + nogil \n" % declaration
# Build up the whole file
res = DoxygenCppFunction.generate_imports(imports_needed) # add default cimport
res += includes
res += cldef
# We need to create a default ctor in any case, however we do not need
# to *wrap* the copy constructor even though we need to have one for Cython
if True: # not default_ctor:
res += " %s() except + nogil \n" % comp_name.split("::")[-1]
if not copy_ctor:
res += " %s(%s) except + nogil #wrap-ignore\n" % (comp_name.split("::")[-1], comp_name.split("::")[-1])
else:
res += " %s(%s) except + nogil \n" % (comp_name.split("::")[-1], comp_name.split("::")[-1])
res += methods
res += enum
res += "\n"
if len(static_methods) > 0:
res += "\n"
res += "# COMMENT: wrap static methods\n"
res += 'cdef extern from "<%s>" namespace "%s::%s":\n' % (internal_file_name, namespace, preferred_classname)
res += "\n"
res += static_methods
res += "\n"
return res
def iterMemberDef(self):
"""Iterate over all members of this class.
We do not care about the sections defined in the documentation here.
"""
for sdef in self.compound.get_sectiondef():
for mdef_ in sdef.get_memberdef():
mdef = DoxygenCppFunction.generate_from_obj(mdef_)
mdef.parent_doxy_file = self
mdef.compoundname = self.compound.compoundname
yield mdef
def isAbstract(self):
for mdef in self.iterMemberDef():
if mdef.get_argsstring().endswith("=0"):
return True
return False
class DoxygenCppFunction(object):
""" A Cpp function definition from a doxygen file"""
def __init__(self):
# No other code here, below is the real init method!
self.initialize_dgencpp()
@staticmethod
def generate_from_obj(mdef):
"""Attaches the functionality of this object to the given input object"""
for k,v in DoxygenCppFunction.__dict__.items():
if callable(v) and not k == "__init__":
import types
mdef.__dict__[k] = types.MethodType(v, mdef)
mdef.initialize_dgencpp()
return mdef
@staticmethod
def generate_imports(imports):
"""
Generate default imports
"""
res = ""
res += "from Types cimport *\n"
for k in sorted(imports.keys()):
if k == "bool":
res += "from libcpp cimport bool\n"
else:
res += "from libcpp.%s cimport %s as libcpp_%s\n" % (k,k,k)
return res
@staticmethod
def compute_imports(declaration, imports):
if declaration.find("libcpp_vector") != -1:
imports["vector"] = 0
if declaration.find("libcpp_pair") != -1:
imports["pair"] = 0
if declaration.find("libcpp_map") != -1:
imports["map"] = 0
if declaration.find("libcpp_set") != -1:
imports["set"] = 0
if declaration.find("libcpp_string") != -1:
imports["string"] = 0
if declaration.find("bool") != -1:
imports["bool"] = 0
def initialize_dgencpp(self):
pass
def resolve_return_type(self):
res = []
return self._resolve_type(self.get_type().content_)
def _resolve_type(self, mtype):
res = []
for c in mtype:
val = c.getValue()
if hasattr(val, "getValueOf_"):
res.append(val.getValueOf_())
else:
res.append(val)
return res
def format_definition_for_cython(self, replace_nogil=True):
"""Parse a doxygen function definition and write it in Cython"""
c_return_type = self.resolve_return_type()
# remove default arguments, Cython doesn't like them
arguments = re.sub("\=[^,\)]*", "", self.get_argsstring())
function_name = self.name
arguments = "("
nested = False
for i,p in enumerate(self.get_param()):
ptype = self._resolve_type(p.get_type().content_)
dname = p.declname
# ignore default arguments etc ... Cython cannot use them
# p.defval.content_
# replace python keywords in argument name: except, type, lamdba, map ...
import keyword
if keyword.iskeyword(dname):
dname = dname + "_"
if dname in dir(__builtins__):
dname = dname + "_"
# dname = dname.replace("except", "except_").replace("type", "type_").replace("lambda", "lambda_").replace("map", "map_")
tojoin = "".join(ptype) + " " + dname.strip()
if tojoin.count("std::") > 2:
nested = True
if i == 0:
arguments += tojoin
else:
arguments += ", " + "".join(ptype) + " " + dname.strip()
arguments += ")"
# arguments = (std::vector< Int > column_indices, std::vector< DoubleReal > column_values, const String & name)
if len(self.get_argsstring()) == 0:
arguments = ""
# remove returned references and const values (Cython cannot deal with those at the moment)
return_type = "".join(c_return_type)
return_type = return_type.replace("&", "")
return_type = return_type.replace("const", "")
return_type = return_type.strip()
cpp_def = return_type + " " + function_name + arguments
# Handle comments
cpp_def = cpp_def.replace("///", "#")
cpp_def = cpp_def.replace("//", "#")
# Add nogil
if replace_nogil:
cpp_def = cpp_def.replace(";", "except + nogil ")
cpp_def = cpp_def.replace("const;", "except + nogil ")
else:
cpp_def = cpp_def.replace("const;", "")
cpp_def = cpp_def.replace(";", "")
# Replace common names from OpenMS, templates, STL constructs etc
# TODO handle static ...
cpp_def = cpp_def.replace("static", "")
cpp_def = cpp_def.replace("MSSpectrum<>", "MSSpectrum")
cpp_def = cpp_def.replace("MSChromatogram<>", "MSChromatogram")
cpp_def = cpp_def.replace("std::vector", "libcpp_vector")
cpp_def = cpp_def.replace("std::map", "libcpp_map")
cpp_def = cpp_def.replace("std::pair", "libcpp_pair")
cpp_def = cpp_def.replace("std::set", "libcpp_set")
cpp_def = cpp_def.replace("std::string", "libcpp_string")
cpp_def = cpp_def.replace("<", "[")
cpp_def = cpp_def.replace(">", "]")
cpp_def = cpp_def.replace("operator[]", "operator__[]")
cpp_def = cpp_def.replace("operator]", "operator>")
cpp_def = cpp_def.replace("operator[", "operator<")
cpp_def = cpp_def.replace("operator__[]", "operator[]")
# Note that template arguments cannot be typedefs but need to be basic types
cpp_def = cpp_def.replace("[ DoubleReal ]", "[ double ]")
cpp_def = cpp_def.replace("[ Size ]", "[ size_t ]")
cpp_def = cpp_def.replace("[Size,Size]", "[size_t,size_t]")
cpp_def = cpp_def.replace("[ Int ]", "[ int ]")
cpp_def = cpp_def.replace("FeatureMap[]", "FeatureMap[Feature]")
cpp_def = cpp_def.replace("MSSpectrum[]", "MSSpectrum")
cpp_def = cpp_def.replace("MSExperiment[]", "MSExperiment")
cpp_def = cpp_def.replace("PeakSpectrum", "MSSpectrum")
cpp_def = cpp_def.replace("PeakMap", "MSExperiment")
# Handle const
cpp_def = cpp_def.replace("const String", "constXXXString")
cpp_def = cpp_def.replace("const ", "")
cpp_def = cpp_def.replace("constXXXString", "const String")
# Alert the user to potential problems and comment out potential
# dangerous things (raw pointers, iterators)
if cpp_def.find("*") != -1 or \
cpp_def.find("::iterator") != -1:
cpp_def = "# POINTER # " + cpp_def
if cpp_def.find("::") != -1:
cpp_def = "# NAMESPACE # " + cpp_def
if self.templateparamlist is not None:
cpp_def = "# TEMPLATE # " + cpp_def
if nested:
cpp_def = "# NESTED STL # " + cpp_def
return cpp_def.strip()
#
## Class for the ignore file
#
class IgnoreFile(object):
"""
Describes the ignore file (e.g. which classes we should skip)
"""
def __init__(self):
self.data = {
"IgnoreNames" : [],
"IgnoreMethods" : {},
}
def load(self, fname):
self.fname = fname
self.data = yaml.load(open(self.fname) )["PyOpenMSChecker"]
def isNameIgnored(self, name):
return name in self.data["IgnoreNames"]
def getIgnoredMethods(self, name):
res = self.data["IgnoreMethods"].get(name, [])
if res is None:
return []
return res
class PXDFileParseError(Exception):
def __init__(self, message):
self.message = message
#
## Class for the .pxd file
#
class PXDFile(object):
"""
Class for the .pxd file
"""
def __init__(self):
pass
@staticmethod
def parse_multiple_files(pxdfiles, comp_name):
def cimport(b, _, __):
print ("cimport", b.module_name, "as", b.as_name)
handlers = { CEnumDefNode : EnumDecl.parseTree,
CppClassNode : CppClassDecl.parseTree,
CTypeDefNode : CTypeDefDecl.parseTree,
CVarDefNode : MethodOrAttributeDecl.parseTree,
CImportStatNode : cimport,
}
found = False
# Go through all files and all classes in those files, trying to find
# the class whose C/C++ name matches the current compound name
for pxdfile in pxdfiles:
cython_file = parse_pxd_file(pxdfile)
for klass in cython_file:
if hasattr(klass[0], "cname"):
if klass[0].cname == comp_name:
found = True
if found: break
if found: break
if not found:
error_str = "Could not find a match for class %s in file %s" % (comp_name, pxdfile)
raise PXDFileParseError(error_str)
# Check if we really have a class, then initialize it
if isinstance(klass[0], CppClassNode):
cl = CppClassDecl.parseTree(klass[0], klass[1], klass[2])
else:
print ("Something is wrong, not a class")
raise PXDFileParseError("wrong")
cl.pxdfile = pxdfile
for klass in cython_file:
handler = handlers.get(type(klass[0]))
res = handler(klass[0], klass[1], klass[2])
if "wrap-attach" in res.annotations:
if res.annotations["wrap-attach"] == cl.name:
## attach this to the above class
cl.methods[res.name] = res
return cl
class TestResult:
""" A Result from a single test which either passed or failed.
In addition, it contains information about the reason why the test failed,
who is the maintainer and a unique testname.
"""
def __init__(self, passed = None, message=None, log_level=None, name=None):
self.message = message
self.passed = passed
self.name = name
self.log_level = log_level
self.maintainer = None
if log_level is None:
self.log_level = 0
def setMessage(self, message_):
self.message = message_
def getMessage(self):
return self.message
def getXMLName(self):
xmlname = self.name
xmlname = xmlname.replace("::", "_")
xmlname = re.sub('[^0-9A-Za-z_]', '', xmlname)
xmlname = xml_escape(xmlname) # still escape, just to be sure
return xmlname
def setPassed(self, passed):
self.passed = passed
def isPassed(self):
return self.passed
def setMaintainer(self, maintainer):
self.maintainer = maintainer
def getMaintainer(self):
if self.maintainer is None:
return "Nobody"
return self.maintainer
class TestResultHandler:
""" A Container for all test results.
"""
def __init__(self):
self._list = []
def append(self, l):
self._list.append(l)
def __iter__(self):
for l in self._list:
yield l
def to_cdash_xml(self, template_path, output_path):
if template_path.endswith("Test.xml"):
body_start = "<Testing>"
elif template_path.endswith("Build.xml"):
body_start = "<Build>"
else:
raise Exception("Unsupported template name %s" % template_path)
xml_output = []
# load template head (everything up to "<Testing>")
# -> this assumes a specific format of the xml
with open(template_path) as f:
for line in f:
if line.strip() == body_start:
break
xml_output.append(line)
"""
# load template head
$template = file($ctestReportingPath."/Test.xml");
$newTestFile = array();
foreach ($template as $line)
{
array_push($newTestFile, $line);
if (trim($line) == "<Testing>")
{
break;
}
}
"""
# Start writing the xml
xml_output.append(" <Testing>\n")
xml_output.append(" <StartDateTime>%s</StartDateTime>\n" % (time.strftime('%b %d %H:%M') ) )
xml_output.append(" <StartTestTime>%s</StartTestTime>\n" % (time.time()) )
xml_output.append(" <TestList>\n")
for classtestresults in self:
for tres in classtestresults:
xml_output.append(" <Test>%s</Test>\n" % xml_escape(tres.getXMLName() ) )
xml_output.append(" </TestList>\n")
for classtestresults in self:
for tres in classtestresults:
status = ""
if tres.isPassed():
status = "passed"
else:
status = "failed"
xml_output.append(" " * 2 + '<Test Status="%s">\n' % status)
xml_output.append(" " * 4 + '<Name>%s</Name>\n' % xml_escape(tres.getXMLName() ) )
xml_output.append(" " * 4 + '<Path> ./tools/ </Path>\n' )
xml_output.append(" " * 4 + '<FullName>%s</FullName>\n' % xml_escape(tres.name) )
xml_output.append(" " * 4 + '<FullCommandLine>python PythonExtensionChecker.py %s</FullCommandLine>\n' % xml_escape(tres.name) )
xml_output.append(" " * 4 + '<Results>')
xml_output.append("""
<NamedMeasurement type="numeric/double" name="Execution Time"><Value>0.001</Value></NamedMeasurement>
<NamedMeasurement type="text/string" name="Completion Status"><Value>Completed</Value></NamedMeasurement>
<NamedMeasurement type="text/string" name="Maintainer"><Value>%s</Value></NamedMeasurement>
<NamedMeasurement type="text/string" name="Command Line"><Value>python PythonExtensionChecker.py</Value></NamedMeasurement>\n""" % (
xml_escape(tres.getMaintainer() )
) )
xml_output.append(" " * 6 + '<Measurement>\n')
xml_output.append(" " * 8 + '<Value>\n')
if not tres.getMessage() is None:
xml_output.append(" " * 10 + xml_escape(tres.getMessage() ) + "\n")
xml_output.append(" " * 8 + '</Value>\n')
xml_output.append(" " * 6 + '</Measurement>\n')
xml_output.append(" " * 4 + '</Results>\n')
xml_output.append(" " * 2 + '</Test>\n')
xml_output.append("<EndDateTime>%s</EndDateTime>\n" % (time.strftime('%b %d %H:%M') ) )
xml_output.append("<EndTestTime>%s</EndTestTime>\n" % (time.time()) )
xml_output.append("<ElapsedMinutes></ElapsedMinutes>\n")
xml_output.append("</Testing>\n")
xml_output.append("</Site>\n")
with open(output_path, "w") as f:
for line in xml_output:
f.write(line)
"""
/*
<?xml version="1.0" encoding="UTF-8"?>
<Site BuildName="Darwin-clang++"
BuildStamp="20121021-2300-Nightly"
Name="laphroaig.imp.fu-berlin.de"
Generator="ctest-2.8.9"
CompilerName="/usr/bin/clang++"
OSName="Mac OS X"
Hostname="laphroaig.imp.fu-berlin.de"
OSRelease="10.7.5"
OSVersion="11G63"
OSPlatform="x86_64"
Is64Bits="1"
VendorString="GenuineIntel"
VendorID="Intel Corporation"
FamilyID="6"
ModelID="37"
ProcessorCacheSize="32768"
NumberOfLogicalCPU="4"
NumberOfPhysicalCPU="2"
TotalVirtualMemory="512"
TotalPhysicalMemory="8192"
LogicalProcessorsPerPhysical="8"
ProcessorClockFrequency="2660"
>
<Testing>
<StartDateTime>Oct 22 18:36 CEST</StartDateTime>
<StartTestTime>1350923805</StartTestTime>
<TestList>
<Test>BinaryComposeFunctionAdapter_test</Test>
</TestList>
<Test Status="passed">
<Name>BinaryComposeFunctionAdapter_test</Name>
<Path>./source/TEST</Path>
<FullName>./source/TEST/BinaryComposeFunctionAdapter_test</FullName>
<FullCommandLine>/Users/aiche/dev/openms/openms-src/build/ninja/source/TEST/bin/BinaryComposeFunctionAdapter_test</FullCommandLine>
<Results>
<NamedMeasurement type="numeric/double" name="Execution Time"><Value>0.469694</Value></NamedMeasurement>
<NamedMeasurement type="text/string" name="Completion Status"><Value>Completed</Value></NamedMeasurement>
<NamedMeasurement type="text/string" name="Command Line"><Value>/Users/aiche/dev/openms/openms-src/build/ninja/source/TEST/bin/BinaryComposeFunctionAdapter_test</Value></NamedMeasurement>
<Measurement>
<Value>
freier Text
</Value>
</Measurement>
</Results>
</Test>
<EndDateTime>Oct 22 18:43 CEST</EndDateTime>
<EndTestTime>1350924239</EndTestTime>
<ElapsedMinutes>7.2</ElapsedMinutes></Testing>
</Site>
*/
"""
def writeOutput(testresults, output_format, cnt, bin_path):
###################################
# Output
###################################
if output_format in ["text", "text-verbose", "text-quiet"]:
for classtestresults in testresults:
if len(classtestresults) > 1:
t = classtestresults[0]
lenfailed = len([t for t in classtestresults if not t.isPassed() ] )
if lenfailed > 0:
print ("== Test results for element %s - from Cpp file %s with maintainer %s and corresponding pxd file %s" % (
t.comp_name, t.file_location, t.maintainer, t.pxdfile))
for tres in classtestresults:
if not tres.isPassed():
print (tres.message)
elif tres.log_level >= 10 and output_format in ["text", "text-verbose"]:
print (tres.message)
elif tres.log_level >= 0 and output_format in ["text-verbose"]:
print (tres.name, "::", tres.message)
elif output_format == "xml":
# check if all files required to report in CDash are present
tag_file = os.path.join(bin_path, "Testing", "TAG" )
try:
# read the first line of tagfile (TAG) -> if it does not exist,
# an IOError is thrown
with open(tag_file) as f:
ctestReportingPath = f.readline().strip()
ctestReportingPath = os.path.join(bin_path, "Testing", ctestReportingPath)
if not os.path.exists( ctestReportingPath ):
raise Exception("Missing directory at %s" % ( ctestReportingPath ) )
except IOError:
raise Exception("Missing nightly test information at %s" % (tag_file) )
template_path = os.path.join(ctestReportingPath, "Test.xml" )
output_path = template_path # output is always Test.xml
if not os.path.isfile(template_path):
template_path = os.path.join(ctestReportingPath, "Build.xml" ) #Build.xml an be used as alternative template
testresults.to_cdash_xml(template_path, output_path)
else:
raise Exception("Unknown output format %s" % output_format)
cnt.print_stats()
cnt.print_skipping_reason()
def checkPythonPxdHeader(src_path, bin_path, ignorefilename, pxds_out, print_pxd, output_format, generate_pxd, verbose):
""" Checks a set of doxygen xml file against a set of pxd header files
For each C++ class found in the doxygen XML files, it tries to identify the
corresponding pxd file. If a pxd file exists, it checks whether
i) all public functions, enums and attributes are wrapped in Python
ii) all void return types are correct in Python (these are not checked at
compile time)
iii) all fields of an enum are accessible from Python
If it finds a method missing, the script suggests an addition and if a
whole class is missing, the script writes suggestion .pxd file to a
specified location (pxds_out).
The output format can either be in text form (human readable) or in xml
form which will try to overwrite the cdash Test.xml file to proivide an
output to cdash. Please only specify xml output if in your binary
you have executed "ctest -D Nightly" or similar.
TODO also look at ./doc/doxygen/doxygen-error.log ?
Make sure to build the doxygen xmls first with
$ make doc_xml
"""
xml_output_path = os.path.join(bin_path, "doc", "xml_output")
xml_files = glob.glob(xml_output_path + "/*.xml")
print ("Found %s doxygen xml files" % (len(xml_files)))
if len(xml_files) == 0:
raise Exception("No doxygen files found in directory:\n%s,\n" % xml_output_path + \
"Please make sure you build the doxygen xmls (make doc_xml)\n" +\
"and that you specified the correct directory." )
print ("Creating pxd file map")
pxd_file_matching = create_pxd_file_map(src_path)
print ("Found %s matching pxd files" % len(pxd_file_matching))
cnt = Counter()
cnt.total = len(xml_files)
ignorefile = IgnoreFile()
if len(ignorefilename) > 0:
ignorefile.load(ignorefilename)
if len(generate_pxd) > 0:
print ("Will only consider class", generate_pxd)
def pxd_text_printout(pxd_text, pxds_out, comp_name, print_pxd):
if print_pxd:
print ("")
print (pxd_text)
if len(pxds_out) > 0 and pxd_text is not None:
fname = os.path.join(pxds_out, "%s.pxd" % comp_name.split("::")[-1] )
with open(fname, "w" ) as f:
f.write(pxd_text)
testresults = TestResultHandler()
# Iterate through all xml files generated by doxygen (these are all the
# classes available in OpenMS)
for f in xml_files:
# Only look out for one specific pxd file (see option --generate_pxd_for)
if len(generate_pxd) > 0:
if f.find(generate_pxd) == -1:
continue
if verbose:
print ("Working on file", f)
# Try to parse the doxygen file
dfile = DoxygenXMLFile(f)
res = dfile.parse_doxygen()
if dfile.parsing_error:
# e.g. <computeroutput><bold></computeroutput>
cnt.skipped_could_not_parse += 1
msg = "Skip:: No-parse :: could not parse file %s with error %s" % (f, dfile.parsing_error_message)
tres = TestResult(False, msg, name="%s_test" % f )
testresults.append([ tres ])
if verbose: print (" - Skip file due to parsing error")
continue
elif os.path.basename(f) == "index.xml":
# Skip the index file
continue
# Parse class and namespace
# Skip certain namespaces or those without any namespace (we are only
# interested in the OpenMS and OpenSwath namespace).
compound = res.get_compounddef()
comp_name = compound.get_compoundname()
if len(comp_name.split("::") ) == 1:
# We are only interested in the classes themselves (in OpenMS
# namespace), we thus skip all TOPP tools, header and cpp
# descriptors which are not inside a namespace:
if verbose: print (" - Skip file without namespace:", comp_name)
continue
if verbose:
print (" - Found class", comp_name, compound.prot, "in namespace", comp_name.split("::")[0])
namespace = comp_name.split("::")[0]
if namespace in ["std", "Ui", "xercesc"]:
# Namespace std, xerces, UI -> skip
continue
elif comp_name.startswith("ms::numpress"):
# MS Numpress namespace
continue
elif comp_name.startswith("KDTree::"):
# KD Tree namespace
continue
elif not (comp_name.startswith("OpenMS") or comp_name.startswith("OpenSwath") or comp_name.startswith("OpenNuXL") ):
# Continue without checking or generating a testreport
print ("Unknown namespace", comp_name)
continue
# Skip files which are listed in the "ignore" file
if ignorefile.isNameIgnored(comp_name):
msg = "Skip:: Ignored :: Class %s (file %s)" % (comp_name, f)
tres = TestResult(True, msg, log_level=10, name="%s_test" % comp_name)
testresults.append([ tres ])
cnt.skipped_ignored += 1
continue
# Ignore private/protected classes
if compound.prot != "public":
msg = "Skip:: Protected :: Compound %s is not public, skip" % (comp_name)
tres = TestResult(True, msg, log_level=10, name="%s_test" % comp_name)
testresults.append([ tres ])
cnt.skipped_protected += 1
continue
# Get file location and skip empty files
file_location = dfile.getCompoundFileLocation(src_path)
internal_file_name = dfile.getInternalFileName()
if verbose:
print (" - Header file location identified as", internal_file_name)
if file_location is None:
msg = "Skip:: No-data :: there is no source file for %s" % f
tres = TestResult(True, msg, log_level=10, name="%s_test" % comp_name)
testresults.append([ tres ])
cnt.skipped_no_location += 1
continue
# Skip empty classes
openms_file = OpenMSSourceFile(file_location)
maintainer = openms_file.getMaintainer()
if dfile.isEmpty(True):
msg = "Skip:: No-data :: File is empty (no section definitions found or only definitions found) in file %s" % f
tres = TestResult(True, msg, log_level=10, name="%s_test" % comp_name)
tres.maintainer = maintainer
testresults.append([ tres ])
cnt.skipped_no_sections += 1
continue
# Retrieve all associated pxd files with this specific header file
if internal_file_name in pxd_file_matching:
pxdfiles = pxd_file_matching[internal_file_name]
else:
msg = "Skip:: No-pxd :: No pxd file exists for Class %s (File %s) %s" % (comp_name, file_location, f)
tres = TestResult(False, msg, name="Missing_%s_test" % comp_name )
tres.maintainer = maintainer
testresults.append([ tres ])
cnt.skipped_no_pxd_file += 1
pxd_text = dfile.get_pxd_from_class(dfile, internal_file_name, xml_output_path)
pxd_text_printout(pxd_text, pxds_out, comp_name, print_pxd)
continue
if verbose:
print (" - Matching pxd files", pxdfiles)
# At this point we have
# - the cpp class as parsed by Doxygen
# - the corresponding OpenMS header file
# - the matching pxd file(s) with the Python wrappers
# Parse the pxd files corresponding to this doxygen XML file
try:
# Raise a (dummy) exception to actually produce a PXD file for a
# specific class if requested by the user.
if len(generate_pxd) > 0:
raise PXDFileParseError ("dummy")
pxd_class = PXDFile.parse_multiple_files(pxdfiles, comp_name)
pxdfile = pxd_class.pxdfile
except PXDFileParseError as e:
# TODO specific exception
msg = "Skip:: No-pxd :: " + e.message + " for %s (in pxd file %s)" % (comp_name, pxdfiles)
tres = TestResult(False, msg, name="Missing_%s_test" % comp_name )
tres.maintainer = maintainer
testresults.append([ tres ])
cnt.skipped_no_pxd_match += 1
pxd_text = dfile.get_pxd_from_class(dfile, internal_file_name, xml_output_path)
pxd_text_printout(pxd_text, pxds_out, comp_name, print_pxd)
continue
# Count the current file
cnt.parsed += 1
# Loop through all methods which are listed in the doxygen XML file and match them to the pxd file
classtestresults = []
for method_cntr, mdef in enumerate(dfile.iterMemberDef()):
if mdef.get_name() in ignorefile.getIgnoredMethods(comp_name):
msg = "Ignore member function/attribute : %s %s %s " % (mdef.kind, mdef.prot, mdef.name)
tres = TestResult(True, msg, log_level=10)
else:
tres = handle_member_definition(mdef, pxd_class, cnt)
testname = "%s_%s::%s" % (comp_name, method_cntr, mdef.name)
testname = testname.replace("::", "_")
testname = re.sub('[^a-zA-Z0-9_]+', '', testname)
tres.comp_name = comp_name
tres.file_location = file_location
tres.pxdfile = pxdfile
tres.maintainer = maintainer
tres.name = testname
classtestresults.append(tres)
testresults.append(classtestresults)
writeOutput(testresults, output_format, cnt, bin_path)
def main(options):
checkPythonPxdHeader(options.src_path, options.bin_path,
options.ignorefile, options.pxds_out,
options.print_pxd, options.output_format,
options.generate_pxd, options.verbose)
def handle_args():
usage = "Python extension checker. Run to identify classes and functions that have not been wrapped yet in pyOpenMS. Make sure you run 'make doc_xml' in the build path (--bin_path) first."
parser = argparse.ArgumentParser(description = usage )
parser.add_argument("--bin_path", dest="bin_path", default=".", help="OpenMS build path")
parser.add_argument("--src_path", dest="src_path", default=".", help="OpenMS source path")
parser.add_argument("--ignore-file", dest="ignorefile", default="", help="Checker ignore file")
parser.add_argument("--pxds-out", dest="pxds_out", default="", help="Folder to write pxd files")
parser.add_argument("--generate_pxd_for", dest="generate_pxd", default="", help="Generate pxd file onyl for this class, then exit")
parser.add_argument("--output", dest="output_format", default="text", help="Output format (valid are 'xml', 'text', 'text-quiet', 'text-verbose' for text or ctest XML format)")
parser.add_argument('--print_pxd', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False, help="Be verbose")
# print ("Usage: checker.php <OpenMS src path> <OpenMS build path> [-u \"user name\"] [-t test] [options]\n";)
args = parser.parse_args(sys.argv[1:])
return args
if __name__=="__main__":
options = handle_args()
main(options)
"""
offending doxygen lines that fail to parse:
include/source/APPLICATIONS/TOPP/IDRipper.cpp
<B>NOTE: The meta value file origin is removed by the @p IDSplitter!!</B>
generates
the <computeroutput>IDSplitter!!</bold></computeroutput>
doc/OpenMS_tutorial/OpenMS_Tutorial.doxygen
\arg \c <b>[1]</b>:<A HREF="http://bieson.ub.uni-bielefeld.de/frontdoor.php?source_opus=1370">
generates
<listitem><para><computeroutput><bold></computeroutput>[1]</bold>:
"""
| Python |
3D | OpenMS/OpenMS | tools/rest_server_update_helper.sh | .sh | 1,442 | 44 | #!/bin/bash
# Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
# SPDX-License-Identifier: BSD-3-Clause
#
# --------------------------------------------------------------------------
# $Maintainer: Timo Sachsenberg $
# $Authors: Timo Sachsenberg $
# --------------------------------------------------------------------------
############################################################################
# Scans the binary folder and outputs tool name and version string used in
# the REST update server. Pipe the output in a versions.txt file and
# replace the the corresponding file on the REST server.
############################################################################
# check command line arguments
if [[ $# != 1 ]]
then
echo "Usage: $0 <new-release-bin-dir>"
exit 1
fi
# directories containing TOPP binaries
BIN_DIR=$1
# tmp filesV
SYSTEM_TMP_DIR=/tmp
TMP_DIR=${SYSTEM_TMP_DIR}/OpenMS_REST_update
mkdir ${TMP_DIR}
TMP_FILE_NEW=${TMP_DIR}/tool_list.txt
# store relevant tool names in tmp files
ls -la ${BIN_DIR}/ \
| awk '{print $9}' \
| sort \
| grep -v -e "Tutorial\|TOPPAS\|TOPPView\|INIFileEditor\|SEARCHENGINES\|OpenMSInfo\|GenericWrapper\|Testing\|SwathWizard" \
| grep -v -e "\.$" \
| grep -v -e "^$" \
| while read i
do
echo -e $i'\t'$(${BIN_DIR}/${i} --help 2>&1 | grep "Version" | sed -E "s/Version: //" | sed -E 's/\s.*$/ /')
done
| Shell |
3D | OpenMS/OpenMS | tools/quickbuild.sh | .sh | 790 | 38 | #!/bin/sh
set -e
# first argument used for number of jobs
if [ ! -z "$1" ]
then
numberOfJobs=$1
else
numberOfJobs=1
fi
###################################
# 1. download and build contrib (if not present already)
git submodule update --init contrib
# set contrib absolute path for configure
CONTRIB_PATH=`pwd`/contrib-build
if [ ! -d "contrib-build" ] ; then
mkdir -p contrib-build
cd contrib-build
cmake -DBUILD_TYPE=ALL -DNUMBER_OF_JOBS=$numberOfJobs ../contrib
cd ..
fi
###################################
# 2. build OpenMS
# - default builds all but first argument is passed to make
mkdir -p openms-build
cd openms-build
# contrib path needs to be an absolute path!
cmake -DOPENMS_CONTRIB_LIBS="$CONTRIB_PATH" -DBOOST_USE_STATIC=On ../
make -j $numberOfJobs
cd ..
| Shell |
3D | OpenMS/OpenMS | tools/quickbuild-osx.sh | .sh | 876 | 38 | #!/bin/sh
set -e
# first argument used for number of jobs
if [ ! -z "$1" ]
then
numberOfJobs=$1
else
numberOfJobs=1
fi
###################################
# 1. download and build contrib (if not present already)
git submodule update --init contrib
# set contrib absolute path for configure
CONTRIB_PATH=`pwd`/contrib-build
if [ ! -d "contrib-build" ] ; then
mkdir -p contrib-build
cd contrib-build
cmake -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DNUMBER_OF_JOBS=$numberOfJobs -DBUILD_TYPE=ALL ../contrib
cd ..
fi
###################################
# 2. build OpenMS
# - default builds all but first argument is passed to make
mkdir -p openms-build
cd openms-build
# contrib path needs to be an absolute path!
cmake -DOPENMS_CONTRIB_LIBS="$CONTRIB_PATH" -DNUMBER_OF_JOBS=$numberOfJobs -DBOOST_USE_STATIC=On ../
make -j $numberOfJobs $@
cd ..
| Shell |
3D | OpenMS/OpenMS | tools/update_ini_files_OpenMS_version.sh | .sh | 330 | 4 | #DEPRECATED. Functionality has been merged into update_version_numbers.yml
TOOL_DIR_PATH="./src/tests/topp/"
find $TOOL_DIR_PATH -type f -iname '*.ini' -exec grep -q '<ITEM name="version".*Version of the tool' {} \; -exec sed -i '' -e 's/name="version" value=".*" type="string"/name="version" value="3.2.0" type="string"/g' {} \;
| Shell |
3D | OpenMS/OpenMS | tools/load_vcvarsall.sh | .sh | 2,294 | 69 | # This is a script for creating a "Visual Studio Command Line" in (git) bash on Windows
# It basically invokes the corresponding vcvarsall.bat (that is also called when opening a VS Command Line)
# and copies all the environment variables from it.
# You can source it or add this function to your ~/.bashrc for example
# call load_msenv [year] from a shell to load the x64 enviroment for the corresponding VS year.
# it will cache the variables in $HOME/.msenv${msversion_year}_bash. Delete it if you want to reload.
function load_msenv() {
local msversion_year=2022
if [ $# -gt 0 ]; then
msversion_year=$1
fi
case $msversion_year in
2017)
msversion_prod=15
;;
2019)
msversion_prod=16
;;
2022)
msversion_prod=17
;;
*)
>&2 printf "Invalid version year. Supported are 2017, 2019 and 2022."
return 1
esac
msversion_prod_p1=$(($msversion_prod+1))
local VSWHERE='C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe'
installPath=$("$VSWHERE" -products '*' -version "[$msversion_prod,$msversion_prod_p1)" -property installationPath)
# TODO check if exists, print error otherwise
local vcvarsall="${installPath}\\VC\\Auxiliary\\Build\\vcvarsall.bat"
local OLDPATH=$PATH
local msenv="$HOME/.msenv_${msversion_year}_bash"
if [ ! -f "$msenv" ]; then
local msenvbatch="__print_ms_env.bat"
echo "@echo off" > "$msenvbatch"
echo "call \"${vcvarsall}\" x64" >> "$msenvbatch"
echo "set" >> "$msenvbatch"
cmd "/C $msenvbatch" > "$msenv.tmp"
rm -f "$msenvbatch"
grep -E '^PATH=' "$msenv.tmp" | \
sed \
-e 's/\(.*\)=\(.*\)/export \1="\2"/g' \
-e 's/\([a-zA-Z]\):[\\\/]/\/\1\//g' \
-e 's/\\/\//g' \
-e 's/;\//:\//g' \
> "$msenv"
# Don't mess with CL compilation env
grep -E '^(INCLUDE|LIB|LIBPATH)=' "$msenv.tmp" | \
sed \
-e 's/\(.*\)=\(.*\)/export \1="\2"/g' \
>> "$msenv"
rm "$msenv.tmp"
fi
source "$msenv"
export PATH="$PATH:$OLDPATH"
}
export -f load_msenv | Shell |
3D | OpenMS/OpenMS | tools/update_version_numbers.sh | .sh | 3,565 | 102 | #!/usr/bin/env bash
################################################################################
set -eu
set -o pipefail
################################################################################
usage() {
cat <<EOF
Usage: $(basename "$0") [options] major minor patch
-h This message
Update the OpenMS version number.
EOF
}
################################################################################
while getopts "h" o; do
case "${o}" in
h)
usage
exit
;;
*)
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [ $# -ne 3 ]; then
echo >&2 "ERROR: please provide three numbers (use --help for more info)"
exit 1
fi
################################################################################
package_version_major=$1
package_version_minor=$2
package_version_patch=$3
package_version="$1.$2.$3"
echo "Setting version $package_version"
################################################################################
# update main cmakelist
sed -i -e "s#.*set(OPENMS_PACKAGE_VERSION_MAJOR.*#set(OPENMS_PACKAGE_VERSION_MAJOR \"$package_version_major\")#" CMakeLists.txt
sed -i -e "s#.*set(OPENMS_PACKAGE_VERSION_MINOR.*#set(OPENMS_PACKAGE_VERSION_MINOR \"$package_version_minor\")#" CMakeLists.txt
sed -i -e "s#.*set(OPENMS_PACKAGE_VERSION_PATCH.*#set(OPENMS_PACKAGE_VERSION_PATCH \"$package_version_patch\")#" CMakeLists.txt
# update version info test
sed -i -e "s#detail.version_major =.*#detail.version_major = $package_version_major;#" ./src/tests/class_tests/openms/source/VersionInfo_test.cpp
sed -i -e "s#detail.version_minor =.*#detail.version_minor = $package_version_minor;#" ./src/tests/class_tests/openms/source/VersionInfo_test.cpp
sed -i -e "s#detail.version_patch =.*#detail.version_patch = $package_version_patch;#" ./src/tests/class_tests/openms/source/VersionInfo_test.cpp
# update vcpkg.json
sed -i -e "s/\"version-string\": \".*\"/\"version-string\": \"$package_version\"/" vcpkg.json
# update test write ini out:
sed -i -e "s#<ITEM name=\"version\" value=\".*\" type=\"string\"#<ITEM name=\"version\" value=\"$package_version\" type=\"string\"#g" ./src/tests/topp/WRITE_INI_OUT.ini
# update INIUpdater version
sed -i -e "s#<ITEM name=\"version\" value=\".*\" type=\"string\"#<ITEM name=\"version\" value=\"$package_version\" type=\"string\"#g" ./src/tests/topp/INIUpdater_1_noupdate.toppas
# update INIs in tests topp:
find ./src/tests/topp/ \
-type f \
-name '*.ini' \
-exec grep -q "<ITEM name=\"version\" value=\".*\" type=\"string\"" {} \; \
-exec sed -i -e "s#<ITEM name=\"version\" value=\".*\" type=\"string\"#<ITEM name=\"version\" value=\"$package_version\" type=\"string\"#g" {} \;
# Update Changelog
#
# Create a banner with line endings that sed wants (add a backslash
# in front of each newline except the last.
section_header=$(
sed -Ee '$!s/$/\\/' <<<"
------------------------------------------------------------------------------------------
---- OpenMS ${package_version} (under development) ----
------------------------------------------------------------------------------------------
" # NOTE: this line should start with a single blank space!
)
# Get the line number of the first line starting with more than 10 "-"
line_num=$(
grep \
--extended-regexp \
--line-number \
--max-count=1 "^-{10,}" CHANGELOG |
cut -d: -f1
)
sed -i \
-e "${line_num}i $section_header" \
CHANGELOG
################################################################################
echo "done."
| Shell |
3D | OpenMS/OpenMS | tools/PythonCheckerLib.py | .py | 3,859 | 107 | from __future__ import print_function
def parse_pxd_file(path):
import os
from Cython.Compiler.CmdLine import parse_command_line
from Cython.Compiler.Main import create_default_resultobj, CompilationSource
from Cython.Compiler import Pipeline
from Cython.Compiler.Scanning import FileSourceDescriptor
from Cython.Compiler.Nodes import CEnumDefNode, CppClassNode, CTypeDefNode, CVarDefNode, CImportStatNode, CDefExternNode
# from Cython.Compiler.ExprNodes import *
options, sources = parse_command_line(["--cplus", path])
path = os.path.abspath(path)
basename = os.path.basename(path)
name, ext = os.path.splitext(basename)
source_desc = FileSourceDescriptor(path, basename)
source = CompilationSource(source_desc, name, os.getcwd())
result = create_default_resultobj(source, options)
context = options.create_context()
pipeline = Pipeline.create_pyx_pipeline(context, options, result)
context.setup_errors(options, result)
root = pipeline[0](source) # only parser
def iter_bodies(tree):
try:
for n in tree.body.stats[0].stats:
# cimports at head of file
yield n
except Exception:
pass
if hasattr(tree.body, "stats"):
for s in tree.body.stats:
if isinstance(s, CDefExternNode):
body = s.body
if hasattr(body, "stats"):
for node in body.stats:
yield node
else:
yield body
elif hasattr(tree.body, "body"):
body = tree.body.body
yield body
else:
raise Exception("parse_pxd_file failed: no valied .pxd file !")
lines = open(path).readlines()
def cimport(b, _, __):
print ("cimport", b.module_name, "as", b.as_name)
handlers = { CEnumDefNode : "",
CppClassNode : "",
CTypeDefNode : "",
CVarDefNode : "",
CImportStatNode : "",
}
# from autowrap.PXDParser import EnumDecl.parseTree, CppClassDecl.parseTree, CTypeDefDecl.parseTree, MethodOrAttributeDecl.parseTree
# handlers = { CEnumDefNode : EnumDecl.parseTree,
# CppClassNode : CppClassDecl.parseTree,
# CTypeDefNode : CTypeDefDecl.parseTree,
# CVarDefNode : MethodOrAttributeDecl.parseTree,
# CImportStatNode : cimport,
# }
result = []
for body in iter_bodies(root):
handler = handlers.get(type(body))
if handler is not None:
# L.info("parsed %s, handler=%s" % (body.__class__, handler.im_self))
result.append([ body, lines, path ])
else:
for node in getattr(body, "stats", []):
handler = handlers.get(type(node))
if handler is not None:
result.append([ node, lines, path ])
return result
def create_pxd_file_map(src_path):
# Searches through all pxd files and matches them to the corresponding
# OpenMS headers. Note that each header can be mapped to multiple pxd
# files:
#
# pxd_file_matching[ HeaderName ] = set( [PXDFile, PXDFile, ...] )
#
import glob, os, re
pxd_path = os.path.join(src_path, "src/pyOpenMS/pxds/")
pxds = glob.glob(pxd_path + "/*.pxd")
pxd_file_matching = {}
for pfile in pxds:
filename_rgx = re.compile("cdef extern from \"<([^\"]*)>\"", re.IGNORECASE)
filematch = [o.group(1) for o in filename_rgx.finditer(open(pfile).read()) ]
for fm in filematch:
res = pxd_file_matching.get(fm, set([]))
res.update([pfile])
pxd_file_matching[fm] = res
return pxd_file_matching
| Python |
3D | OpenMS/OpenMS | tools/update_header_dates.sh | .sh | 362 | 4 | #find . -type f | xargs sed -i 's/and Freie Universitaet Berlin 2002-present.and Freie Universitaet Berlin 2002-present.g'
## For all files and on MacOS:
find . -type f ! -path "./.git/*" -exec grep -q "Freie Universitaet Berlin 2002-20" {} \; -exec sed -i '' -e 's/and Freie Universitaet Berlin 2002-present.and Freie Universitaet Berlin 2002-present.g' {} \;
| Shell |
3D | OpenMS/OpenMS | tools/overwriteTOPPTestFiles.sh | .sh | 541 | 12 | #!/bin/bash
# Use in your build folder with the test regex (as for ctest -R) as argument like: ../tools/overwriteTOPPTestFiles.sh TOPP_OpenSwathAnalyzer
# Runs the tests with ctest, checks for failed ones and copies the (temporary) result file from the test OVER the
# expected file from the OpenMS test sources.
ctest -V -R $1 > tmp.tst.log
grep -e "^[0-9]*: diff" tmp.tst.log | sed 's/^.*diff/cp/g' > copies_to_perform.txt
cat copies_to_perform.txt | while read -r line
do
echo "$line"
$line
done
rm tmp.tst.log copies_to_perform.txt | Shell |
3D | OpenMS/OpenMS | tools/ci/deps-macos.sh | .sh | 1,228 | 62 | #!/usr/bin/env bash
set -eu
set -o pipefail
# Unfortunately GitHub's macOS runner already has Python installed so
# we need to tell brew to overwrite the existing links. The following
# function will be called when the brew commands below are executed.
# It then calls the real brew command.
function brew() {
local action=$1
shift
# Bash on macOS doesn't allow using empty arrays. Therefore we put
# the action name in the flags array so it always has at least one
# element. This is also why we install bash below.
local -a flags=("$action")
if [ "$action" = "install" ]; then
flags+=("--overwrite")
fi
command brew "${flags[@]}" "$@"
}
# Code between the following doxygen markers are included in the
# public-facing OpenMS installation instructions.
# [installation_documentation]
# Update the package index:
brew update
# Required dependencies:
brew install \
python \
ccache \
autoconf \
automake \
libtool \
ninja \
libomp \
libsvm \
xerces-c \
boost \
eigen \
sqlite \
coinutils \
cbc \
cgl \
clp \
qtbase \
qtsvg \
apache-arrow \
bash
# Optional dependencies:
brew install \
doxygen \
ghostscript \
graphviz
# [installation_documentation]
| Shell |
3D | OpenMS/OpenMS | tools/ci/capture-env.sh | .sh | 3,162 | 141 | #!/usr/bin/env bash
################################################################################
set -eu
set -o pipefail
################################################################################
vars_to_cache=(
"ADDRESS_SANITIZER"
"CC"
"CXX"
"CMAKE_CXX_COMPILER"
"CMAKE_CXX_COMPILER_LAUNCHER"
"CMAKE_C_COMPILER_LAUNCHER"
"CFLAGS"
"CXXFLAGS"
"CMAKE_PREFIX_PATH"
"CMAKE_BUILD_TYPE"
"CMAKE_GENERATOR_PLATFORM"
"Boost_DEBUG"
"BOOST_USE_STATIC"
"OPENMS_CONTRIB_LIBS"
"ENABLE_CLASS_TESTING"
"ENABLE_GCC_WERROR"
"ENABLE_STYLE_TESTING"
"ENABLE_TOPP_TESTING"
"ENABLE_PIPELINE_TESTING"
"ENABLE_DOCS"
"ENABLE_CWL_GENERATION"
"ENABLE_TUTORIALS"
"ENABLE_UPDATE_CHECK"
"MT_ENABLE_OPENMP"
"NO_DEPENDENCIES"
"SEARCH_ENGINES_DIRECTORY"
"SIGNING_IDENTITY"
"SIGNING_EMAIL"
"CPACK_PRODUCTBUILD_IDENTITY_NAME"
"PACKAGE_TYPE"
"PYOPENMS"
"PY_MEMLEAK_DISABLE"
"PY_NO_OPTIMIZATION"
"PY_NO_OUTPUT"
"PY_NUM_MODULES"
"PY_NUM_THREADS"
"Python_ROOT_DIR"
"Python_FIND_STRATEGY"
"WITH_GUI"
"WITH_PARQUET"
"WITH_THERMORAWFILEPARSER_TEST"
"COMPILE_PXDS"
"USE_EXTERNAL_JSON"
)
################################################################################
option_verbose=0
################################################################################
function usage() {
cat <<EOF
Usage: $(basename "$0") [options] file
-h This message
-v Verbose output
Write environment variables to file.
EOF
}
################################################################################
function write_file() {
local file=$1
mkdir -p "$(dirname "$file")"
if [ -e "$file" ]; then
rm -f "$file"
fi
for var in "${vars_to_cache[@]}"; do
# Indirection: get value, default to empty
val="${!var:-}"
# Sanitize: trim CR, LF, and trailing whitespace
val_sane="$(printf '%s' "$val" | tr -d '\r\n' | sed 's/[[:space:]]*$//')"
if [ -n "$val_sane" ]; then
if [ "$option_verbose" -eq 1 ]; then
# Redact sensitive variables
case "$var" in
SIGNING_EMAIL|SIGNING_IDENTITY|CPACK_PRODUCTBUILD_IDENTITY_NAME)
# Mask: show first and last char, rest as asterisks (if length > 2)
len=${#val_sane}
if [ "$len" -le 2 ]; then
masked="$val_sane"
else
masked="${val_sane:0:1}***${val_sane: -1}"
fi
echo "Found $var with value $masked (redacted)"
;;
*)
echo "Found $var with value $val_sane"
;;
esac
fi
printf '%s:STRING=%s\n' "$var" "$val_sane" >>"$file"
fi
done
}
################################################################################
function main() {
while getopts "hv" o; do
case "${o}" in
h)
usage
exit
;;
v)
option_verbose=1
;;
*)
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [ $# -ne 1 ]; then
echo >&2 "ERROR: missing file argument, see -h"
exit 1
fi
write_file "$1"
}
################################################################################
main "$@"
| Shell |
3D | OpenMS/OpenMS | tools/ci/deps-ubuntu.sh | .sh | 1,481 | 64 | #!/usr/bin/env bash
set -eu
set -o pipefail
# Code between the following doxygen markers are included in the
# public-facing OpenMS installation instructions.
# [installation_documentation]
# Add "universe" and update:
sudo add-apt-repository universe
sudo apt update
# Required dependencies:
sudo apt-get -qq install -y \
build-essential \
cmake \
autoconf \
patch \
libtool \
git \
automake \
ninja-build \
xvfb \
ccache \
qt6-base-dev \
libqt6svg6-dev \
libqt6opengl6-dev \
libqt6openglwidgets6 \
libgl-dev \
libeigen3-dev \
libboost-random-dev \
libboost-regex-dev \
libboost-iostreams-dev \
libboost-date-time-dev \
libboost-math-dev \
libxerces-c-dev \
zlib1g-dev \
libsvm-dev \
libbz2-dev \
coinor-libcoinmp-dev \
libhdf5-dev \
libsqlite3-dev \
libsqlitecpp-dev \
nlohmann-json3-dev \
libsimde-dev
sudo apt update
sudo apt-get install -y -V ca-certificates lsb-release wget
wget https://packages.apache.org/artifactory/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb
sudo apt update
sudo apt-get install -y -V ./apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb
sudo apt update
sudo apt-get install -y --no-install-recommends \
libarrow-dev \
libparquet-dev \
# Optional dependencies:
sudo apt-get -qq install -y \
doxygen \
ghostscript \
graphviz
# [installation_documentation]
| Shell |
3D | OpenMS/OpenMS | tools/scripts/create_im_with_swathIm.py | .py | 7,952 | 230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
SPDX-License-Identifier: BSD-3-Clause
--------------------------------------------------------------------------
$Maintainer: Hannes Roest, Joshua Charkow $
$Authors: Hannes Roest, Joshua Charkow $
--------------------------------------------------------------------------
"""
from __future__ import division
from __future__ import print_function
# Create simulated ion mobility scans for testing
from pyopenms import *
import pyopenms
exp = MSExperiment()
##################################### PARAMETERS ##########################################
NR_RT_SAMPLES = 50
IM_BINS = (300, 700)
NR_PEAKS = 5
# create MS2 precursor across IM for a single spectrum
# allmz, allint, allim = lists storing all of the information for the spectrum
# imCenterIdx = idx IM value, will be divided by 500
# imWidth = width of IM in idx units
# base_int = base intensity for the spectrum
# baseFragmentMz = lowest fragmentMz
# nr_peaks = number of fragment peaks to create
# imWinLim = window limits of IM, do not plot anything outside of this window
def createMS2Precursor(allmz, allint, allim, imCenterIdx, imWidth, base_int, baseFragmentMz = 100, nr_peaks = NR_PEAKS, im_bins=IM_BINS, imWinLim=None):
if imWinLim is None:
for im_idx in range(*im_bins):
for i in range(nr_peaks):
if (imCenterIdx - imWidth) < im_idx < (imCenterIdx + imWidth):
apex_dist = abs( imCenterIdx - im_idx)
p = Peak1D()
p.setMZ(baseFragmentMz+i)
p.setIntensity(base_int * (i + 1) - base_int * (i + 1) * apex_dist / imWidth)
allmz.append(p.getMZ())
allint.append(p.getIntensity())
allim.append( im_idx / 500.0)
else: # have to check if in range of IM upper limit
for im_idx in range(*im_bins):
for i in range(nr_peaks):
if (imCenterIdx - imWidth) < im_idx < (imCenterIdx + imWidth) and (imWinLim[0] < im_idx < imWinLim[1] ):
apex_dist = abs( imCenterIdx - im_idx)
p = Peak1D()
p.setMZ(baseFragmentMz+i)
p.setIntensity(base_int * (i + 1) - base_int * (i + 1) * apex_dist / imWidth)
allmz.append(p.getMZ())
allint.append(p.getIntensity())
allim.append( im_idx / 500.0)
# add values to spectrum across IM corresponding with a precursor
# sp = input spectrum
# fda = input float data array
# mz = mz of precursor
# im = im of precursor (in idx unit will be divided by 500)
# base_intensity = intensity to base other intensity calculations off of
# im_offset_idx = deviation in ion mobility (each side) in idx units
# nr_peaks = number of peaks to create
def addMS1Precursor(sp, fda, mz, im_idx, base_intensity, im_offset_idx=2, nr_peaks=10):
for i in range(nr_peaks):
p = Peak1D()
p.setMZ(mz)
p.setIntensity(base_intensity+ (i-5))
sp.push_back(p)
fda.push_back( (im_idx - im_offset_idx + i*im_offset_idx)/500)
########### 1. Create MS1 spectra #####################
## create peaks from 100m/z to 200m/z
for rt_idx in range(NR_RT_SAMPLES):
# compute base intensity
base_int = 100 - abs(20 - rt_idx)*(100)/25.0 # shift the precursor slightly
base_int = max(5, base_int)
sp = MSSpectrum()
sp.setRT(rt_idx)
sp.setMSLevel(1)
fda = pyopenms.FloatDataArray()
fda.setName("Ion Mobility")
fda.resize(100)
for i in range(100):
p = Peak1D()
p.setMZ(100+i)
p.setIntensity(base_int+i)
sp.push_back(p)
fda[i] = 300/500
# add precursors that are in the library
addMS1Precursor(sp, fda, 412.502, 350, base_int)
addMS1Precursor(sp, fda, 417.502, 450, base_int, im_offset_idx=5)
addMS1Precursor(sp, fda, 422.502, 550, base_int)
sp.setFloatDataArrays([fda])
exp.addSpectrum(sp)
###### CREATE MS2 SPECTRA FOR FIRST SWATH ##########
rt_offset = 0.2
for rt_idx in range(NR_RT_SAMPLES):
base_int = 100 - abs(25 - rt_idx)*(100)/25.0
base_int_second = 100 - abs(10 - rt_idx)*(100)/40.0
allmz = []
allint = []
allim = []
# peaks of a precursor at 412.5 m/z : 100, 101, 102, .. 100 + NR_PEAKS
# and ion mobility 100
createMS2Precursor(allmz, allint, allim, imCenterIdx=350, imWidth=10, base_int=base_int, baseFragmentMz=100)
# peaks of a precursor at 417.5 m/z : 100.01, 101.01, 102.01, .. 100 + NR_PEAKS. Slight offset so chromatograms can be sorted unambiguously
# and ion mobility 150
createMS2Precursor(allmz, allint, allim, imCenterIdx=450, imWidth=20, base_int=base_int, imWinLim=(300, 450), baseFragmentMz=100.01)
fda = pyopenms.FloatDataArray()
fda.setName("Ion Mobility")
fda.resize(len(allmz))
for k,val in enumerate(allim):
fda[k] = val
### create spectrum and set metaData
sframe = pyopenms.MSSpectrum()
sframe.setMSLevel(2)
sframe.setRT(rt_idx + rt_offset)
## set IM window boundaries # precursor m/z 417.5 is in this window but not centered, thus should not be extracted from this window
sframe.setMetaValue('ion mobility lower limit', 300/500)
sframe.setMetaValue('ion mobility upper limit', 450/500)
p = pyopenms.Precursor()
center = 412.5
width = 25
p.setMZ(center)
p.setIsolationWindowUpperOffset(width / 2.0)
p.setIsolationWindowLowerOffset(width / 2.0)
sframe.setPrecursors([p])
# store data in spectrum
sframe.set_peaks( (allmz, allint) )
sframe.setFloatDataArrays([fda])
sframe.sortByPosition()
exp.addSpectrum(sframe)
############# CREATE MS2 SPECTRA FOR 2ND SWATH #########################
rt_offset = 0.3
for rt_idx in range(NR_RT_SAMPLES):
base_int = 100 - abs(25 - rt_idx)*(100)/25.0
base_int_second = 100 - abs(10 - rt_idx)*(100)/40.0
allmz = []
allint = []
allim = []
# peaks of a precursor at 412.5 m/z : 100, 101, 102, .. 100 + NR_PEAKS
# and ion mobility 100
createMS2Precursor(allmz, allint, allim, imCenterIdx=350, imWidth=10, base_int=base_int, imWinLim=(350, 600))
# peaks of a precursor at 417.5 m/z : 100.01, 101.01, 102.01, .. 100.01 + NR_PEAKS
# and ion mobility 150
createMS2Precursor(allmz, allint, allim, imCenterIdx=450, imWidth=20, base_int=base_int, imWinLim=(350, 600), baseFragmentMz=100.01)
# peaks of a precursor at 422.5 m/z : 100.02, 101.02, 102.02, .. 100.02 + NR_PEAKS
# and ion mobility 150
createMS2Precursor(allmz, allint, allim, imCenterIdx=550, imWidth=10, base_int=base_int, imWinLim=(350, 600), baseFragmentMz=100.02)
fda = pyopenms.FloatDataArray()
fda.setName("Ion Mobility")
fda.resize(len(allmz))
for k,val in enumerate(allim):
fda[k] = val
### create spectrum and set metaData
sframe = pyopenms.MSSpectrum()
sframe.setMSLevel(2)
sframe.setRT(rt_idx + rt_offset)
## set IM window boundaries # precursor m/z 417.5 is in this window but not centered, thus should not be extracted from this window
sframe.setMetaValue('ion mobility lower limit', 350/500)
sframe.setMetaValue('ion mobility upper limit', 600/500)
p = pyopenms.Precursor()
center = 412.5
width = 25
p.setMZ(center)
p.setIsolationWindowUpperOffset(width / 2.0)
p.setIsolationWindowLowerOffset(width / 2.0)
sframe.setPrecursors([p])
# store data in spectrum
sframe.set_peaks( (allmz, allint) )
sframe.setFloatDataArrays([fda])
sframe.sortByPosition()
exp.addSpectrum(sframe)
############# EXPOERT MZML FILE ##########################
f = MzMLFile()
pf = f.getOptions()
pf.setCompression(True)
f.setOptions(pf)
exp.sortSpectra()
f.store('OpenSwathWorkflow_23_input.mzML', exp)
| Python |
3D | OpenMS/OpenMS | tools/scripts/git_showdiff.sh | .sh | 396 | 16 |
echo "Usage: git_showdiff.sh REV1 REV2"
git diff $1 $2 --name-only \
| grep -v '^src/tests/topp/' \
| grep -v '^share/OpenMS' \
| grep -v '^src/tests/class_tests/openms/data/' \
| grep -v '^cmake/modules/' \
| grep -v '^cmake/' \
| grep -v '^src/openswathalgo/thirdparty' \
| grep -v '^src/openms/thirdparty/' \
| grep -v '^tools' \
| xargs git diff --shortstat $1 $2 --
| Shell |
3D | OpenMS/OpenMS | tools/scripts/check_for_unused_testdata.py | .py | 3,765 | 96 | import os
import subprocess
import glob
# script that lists all test files in the topp folder that are:
# 1. tracked by git
# 2. not referenced in the topp tests (CMakeList.txt)
# after careful check these potentially can be removed
def list_files_in_directory(start_path):
""" Returns a list of all files in a directory (no recurse). """
files_list = []
for file in os.listdir(start_path):
full_path = os.path.join(start_path, file)
if os.path.isfile(full_path):
files_list.append(os.path.join(start_path, file))
return files_list
def file_contains_text(file_path, text):
""" Checks if the given text is in the file. """
try:
with open(file_path, 'r', encoding='utf-8') as file:
return text in file.read()
except UnicodeDecodeError:
return False
def resolve_files(file_list):
""" Resolves a list of file paths and wildcard patterns to actual file paths. """
resolved_files = []
for file_path in file_list:
# Check if the file path contains a wildcard
if '*' in file_path:
# Expand the wildcard pattern to actual file paths
expanded_files = glob.glob(file_path, recursive=True)
resolved_files.extend(expanded_files)
else:
# If no wildcard, just add the file path as is
if os.path.exists(file_path):
resolved_files.append(file_path)
else:
print(f"Warning: The file '{file_path}' does not exist.")
return resolved_files
def main(test_data_dir, source_files_to_check):
test_files = list_files_in_directory(test_data_dir)
source_files = resolve_files(source_files_to_check) # glob wildcards
test_files_not_in_sources = []
for test_file in test_files:
test_file_name = os.path.basename(test_file)
found = False
for source_file in source_files:
if file_contains_text(source_file, test_file_name):
found = True
break
if not found:
test_files_not_in_sources.append(test_file)
#print("Test files not found in any source file:")
#for file in test_files_not_in_sources:
# print(file)
print("Test files tracked by GIT but not found in any source file:")
tracked = filter_tracked_files(test_files_not_in_sources) # print tracked files not referenced in sources
for file in tracked:
print(file)
def get_tracked_files():
"""Returns a set of tracked files in the current Git repository."""
try:
# Using git ls-files to list tracked files
result = subprocess.run(['git', 'ls-files', test_data_directory], capture_output=True, text=True)
result.check_returncode() # Ensures that the git command didn't fail
# Split the output by lines to get individual files
tracked_files = set(result.stdout.splitlines())
tracked_files = [os.path.basename(file) for file in tracked_files]
return tracked_files
except subprocess.CalledProcessError as e:
print("Failed to run git command:", e)
return set()
def filter_tracked_files(file_list):
"""Filters the given list of files to find which are tracked."""
tracked_files = get_tracked_files()
# Find the intersection of our file list with the tracked files
return [file for file in file_list if os.path.basename(file) in tracked_files]
if __name__ == "__main__":
# file names in test data will be checked for existance in source file
test_data_directory = '../../src/tests/topp'
source_files_to_check = [ '../../src/tests/topp/CMakeLists.txt', '../../src/tests/class_tests/openms/source/*.cpp']
main(test_data_directory, source_files_to_check)
| Python |
3D | OpenMS/OpenMS | tools/scripts/create_testdata_flqt.py | .py | 1,133 | 34 |
import pyopenms
"""
Producing the test data for TOPP_FeatureLinkerUnlabeledQT_5 and TOPP_FeatureLinkerUnlabeledQT_6
"""
fmaps = [ pyopenms.FeatureMap() for i in range(3)]
pepids = []
pepseq = ["PEPTIDEA", "PEPTIDEK", "PEPTIDER"]
for s in pepseq:
pepid = pyopenms.PeptideIdentification()
hit = pyopenms.PeptideHit()
hit.setSequence(pyopenms.AASequence.fromString(s, True))
pepid.insertHit(hit)
pepid.setIdentifier("Protein0")
pepids.append(pepid)
protid = pyopenms.ProteinIdentification()
protid.setIdentifier("Protein0")
for i,fmap in enumerate(fmaps):
fmap.setProteinIdentifications( [protid])
# add 3 features to each map, but with a twist (adding different peptide ids to different maps)
for k in range(3):
f = pyopenms.Feature()
f.setRT(300 + k*100 + i*10)
f.setMZ(500 + k*0.001 + i*0.01)
f.setIntensity(500 + i*100)
f.setMetaValue("sequence", pepseq[ (i+k) % 3]) # easier viewing in TOPPView
f.setPeptideIdentifications( [pepids[(i+k) % 3]] )
fmap.push_back(f)
pyopenms.FeatureXMLFile().store("output_%s.featureXML" % i, fmap)
| Python |
3D | OpenMS/OpenMS | tools/scripts/create_im.py | .py | 4,192 | 147 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
# SPDX-License-Identifier: BSD-3-Clause
#
# --------------------------------------------------------------------------
# $Maintainer: Hannes Roest $
# $Authors: Hannes Roest $
# --------------------------------------------------------------------------
"""
from __future__ import division
from __future__ import print_function
# Create simulated ion mobility scans for testing
from pyopenms import *
exp = MSExperiment()
# print(dir(exp))
NR_RT_SAMPLES = 50
NR_IM_BINS = 300
NR_PEAKS = 5
# Create MS1 spectra
for rt_idx in range(NR_RT_SAMPLES):
sp = MSSpectrum()
sp.setRT(rt_idx)
sp.setMSLevel(1)
base_int = 100 - abs(20 - rt_idx)*(100)/25.0 # shift the precursor slightly
base_int = max(5, base_int)
fda = pyopenms.FloatDataArray()
fda.setName("Ion Mobility")
fda.resize(100)
for i in range(100):
p = Peak1D()
p.setMZ(100+i)
p.setIntensity(base_int+i)
sp.push_back(p)
fda[i] = 10
for i in range(10):
p = Peak1D()
p.setMZ(412.502)
p.setIntensity(base_int+ (i-5))
sp.push_back(p)
fda.push_back( 99 + (i-5) )
for i in range(10):
p = Peak1D()
p.setMZ(417.502)
p.setIntensity(base_int+ (i-5))
sp.push_back(p)
fda.push_back( 152 + (i-5) )
sp.setFloatDataArrays([fda])
exp.addSpectrum(sp)
# Create MS2 spectra
for rt_idx in range(NR_RT_SAMPLES):
# Base intensity is a single peak at 25 RT with 100 *i intensity spread across ion mobility scans
# Second base intensity is a single peak at 10 RT with 100 *i intensity spread across ion mobility scans
base_int = 100 - abs(25 - rt_idx)*(100)/25.0
base_int_second = 100 - abs(10 - rt_idx)*(100)/40.0
print("base int", base_int, abs(25 - rt_idx)*(100)/25.0 )
allmz = []
allint = []
allim = []
for im_idx in range(NR_IM_BINS):
sp = MSSpectrum()
p = pyopenms.Precursor()
p.setIsolationWindowLowerOffset(12)
p.setIsolationWindowUpperOffset(12)
target_mz = im_idx * 2.5 + 400
p.setMZ(target_mz)
sp.setPrecursors([p])
sp.setRT(rt_idx+0.2)
sp.setMSLevel(2)
# peaks of a precursor at 412.5 m/z : 100, 101, 102, .. 100 + NR_PEAKS
# and ion mobility 100
for i in range(NR_PEAKS):
if im_idx > 90 and im_idx < 90 + 20:
apex_dist = abs( 100 - im_idx)
p = Peak1D()
p.setMZ(100+i)
p.setIntensity(base_int * (i + 1) - base_int * (i + 1) * apex_dist / 10.0)
allmz.append(p.getMZ())
allint.append(p.getIntensity())
allim.append( im_idx / 500.0)
# peaks of a precursor at 417.5 m/z : 100, 101, 102, .. 100 + NR_PEAKS
# and ion mobility 150
for i in range(NR_PEAKS):
if im_idx > 130 and im_idx < 130 + 40:
apex_dist = abs( 150 - im_idx)
p = Peak1D()
p.setMZ(100+i)
p.setIntensity(base_int * (i + 1) - base_int * (i + 1) * apex_dist / 20.0)
allmz.append(p.getMZ())
allint.append(p.getIntensity())
allim.append( im_idx / 500.0)
mz = allmz
intens = allint
ims = allim
# print(mz, intens)
fda = pyopenms.FloatDataArray()
fda.setName("Ion Mobility")
fda.resize(len(mz))
for k,val in enumerate(ims):
fda[k] = val
sframe = pyopenms.MSSpectrum()
sframe.setMSLevel(2)
sframe.setRT(rt_idx+0.2)
sframe.setFloatDataArrays([fda])
p = pyopenms.Precursor()
if True:
center = 412.5
width = 25
p.setMZ(center)
p.setIsolationWindowUpperOffset(width / 2.0)
p.setIsolationWindowLowerOffset(width / 2.0)
sframe.setPrecursors([p])
sframe.set_peaks( (mz, intens) )
sframe.sortByPosition()
exp.addSpectrum(sframe)
f = MzMLFile()
pf = f.getOptions()
pf.setCompression(True)
f.setOptions(pf)
exp.sortSpectra()
f.store('output.mzML', exp)
| Python |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_SavitzkyGolayFilter.cpp | .cpp | 1,143 | 39 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/PROCESSING/SMOOTHING/SavitzkyGolayFilter.h>
#include <OpenMS/PROCESSING/RESAMPLING/LinearResampler.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto file_dta = OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_SavitzkyGolayFilter.dta");
// A DTA file always has exactly one Spectrum, so we get that
MSSpectrum spectrum;
// Load the dta file into the spectrum
FileHandler().loadSpectrum(file_dta, spectrum);
LinearResampler lr;
Param param_lr;
param_lr.setValue("spacing", 0.01);
lr.setParameters(param_lr);
lr.raster(spectrum);
SavitzkyGolayFilter sg;
Param param_sg;
param_sg.setValue("frame_length", 21);
param_sg.setValue("polynomial_order", 3);
sg.setParameters(param_sg);
sg.filter(spectrum);
return 0;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_FileIO.cpp | .cpp | 2,339 | 54 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/FeatureMap.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto file_mzXML = OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_FileIO.mzXML");
// temporary data storage
PeakMap map;
// convert MzXML to MzML. Internally we use FileHandler to do the actual work.
// Here we limit the input type to be MzXML only
FileHandler().loadExperiment(file_mzXML, map, {FileTypes::MZXML});
FileHandler().storeExperiment("Tutorial_FileIO.mzML", map, {FileTypes::MZML});
// The FileHandler object can also hold options for how to load the file
FileHandler f;
PeakFileOptions opts;
// Here we set the MZ range to load to 100-200
opts.setMZRange({100, 200});
f.setOptions(opts);
f.loadExperiment(file_mzXML, map, {FileTypes::MZXML});
// we can also load an experiment from a file without any restrictions on the file type:
FileHandler().loadExperiment(File::path(file_mzXML) + "/Tutorial_Spectrum1D.dta", map);
// if we want to allow all types that can store MS2 data we can do the following:
FileHandler().loadExperiment(file_mzXML, map,
FileTypeList::typesWithProperties({FileTypes::FileProperties::PROVIDES_EXPERIMENT}));
// The curly braces can contain multiple file properties. The FileTypeList that is created is the intersection of these properties
// so: FileTypeList::typesWithProperties({FileTypes::FileProperties::PROVIDES_EXPERIMENT, FileTypes::FileProperties::READABLE})
// returns only fileTypes which can store both MS1 and MS2 spectra
// We use various FileHandler functions to load other types.
FeatureMap feat;
FileHandler().loadFeatures(File::path(file_mzXML) + "/Tutorial_Labeled.featureXML", feat);
// If we try to load something from a file that can't store that info (for example trying to get an experiment from an idXML file)
// An error gets thrown at run time. Check out @p FileHandler class for more info
} // end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_MetaInfo.cpp | .cpp | 709 | 28 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/METADATA/MetaInfoInterface.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
Int main()
{
MetaInfoInterface info;
// insert meta data
info.setMetaValue("color", String("#ff0000"));
info.setMetaValue("id", 112131415);
// access id by index
UInt id_index = info.metaRegistry().getIndex("id");
cout << "id : " << (UInt)(info.getMetaValue(id_index)) << endl;
// access color by name
cout << "color: " << (String)(info.getMetaValue("color")) << endl;
return 0;
} // end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_GUI_Plot1D.cpp | .cpp | 1,404 | 39 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/OnDiscMSExperiment.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/VISUAL/LayerDataBase.h>
#include <OpenMS/VISUAL/Plot1DWidget.h>
#include <OpenMS/VISUAL/Plot2DWidget.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <QApplication>
#include <memory>
using namespace OpenMS;
using namespace std;
Int main(int argc, const char** argv)
{
String tutorial_data_path(OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_Spectrum1D.dta"));
QApplication app(argc, const_cast<char**>(argv));
AnnotatedMSRun exp;
auto exp_sptr = std::make_shared<AnnotatedMSRun>();
MSSpectrum spec;
// demonstrating how to load a single spectrum from file formats which only contain a single spec
// alternatively: use FileHandler().loadExperiment() if you need an experiment anyway
FileHandler().loadSpectrum(tutorial_data_path, spec, {FileTypes::DTA});
exp_sptr->getMSExperiment().addSpectrum(spec);
LayerDataBase::ODExperimentSharedPtrType on_disc_exp_sptr(new OnDiscMSExperiment());
Plot1DWidget widget(Param(), DIM::Y, nullptr);
widget.canvas()->addPeakLayer(exp_sptr, on_disc_exp_sptr);
widget.show();
return app.exec();
} // end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_GUI_ParamEditor.cpp | .cpp | 929 | 37 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/VISUAL/ParamEditor.h>
#include <OpenMS/DATASTRUCTURES/Param.h>
#include <OpenMS/FORMAT/ParamXMLFile.h>
#include <OpenMS/SYSTEM/File.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
#include <QApplication>
using namespace OpenMS;
using namespace std;
Int main(int argc, const char** argv)
{
String tutorial_data_path(OPENMS_DOC_PATH + String("/code_examples/"));
QApplication app(argc, const_cast<char**>(argv));
Param param;
ParamXMLFile paramFile;
paramFile.load(tutorial_data_path + "/data/Tutorial_ParamEditor.ini", param);
ParamEditor editor(nullptr);
editor.load(param);
editor.show();
app.exec();
editor.store();
paramFile.store("Tutorial_ParamEditor_out.ini", param);
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Unlabeled.cpp | .cpp | 1,313 | 40 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/ANALYSIS/MAPMATCHING/FeatureGroupingAlgorithmUnlabeled.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto tutorial_data_path = OPENMS_DOC_PATH + String("/code_examples/");
vector<FeatureMap > maps;
maps.resize(2);
FileHandler feature_file;
feature_file.loadFeatures(tutorial_data_path + "/data/Tutorial_Unlabeled_1.featureXML", maps[0]);
feature_file.loadFeatures(tutorial_data_path + "/data/Tutorial_Unlabeled_2.featureXML", maps[1]);
ConsensusMap out;
out.getColumnHeaders()[0].filename = "/data/Tutorial_Unlabeled_1.mzML";
out.getColumnHeaders()[0].size = maps[0].size();
out.getColumnHeaders()[1].filename = "/data/Tutorial_Unlabeled_2.mzML";
out.getColumnHeaders()[1].size = maps[1].size();
FeatureGroupingAlgorithmUnlabeled algorithm;
// ... set parameters
algorithm.group(maps, out);
FileHandler consensus_file;
consensus_file.storeConsensusFeatures("Tutorial_Unlabeled.consensusXML", out);
return 0;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_DPosition.cpp | .cpp | 837 | 36 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/DATASTRUCTURES/DPosition.h>
#include <iostream>
using namespace OpenMS;
Int main()
{
DPosition<2> pos {-8.15, 47.11};
static_assert(pos.size() == 2);
std::cout << "largest possible value: " << DPosition<2>::maxPositive() << '\n';
// make values in all dimensions positive and print
std::cout << "abs: " << pos.abs() << '\n';
// manipulate individual dimensions
pos[0] = -3.15;
pos[1] = 7.11;
for (Size i = 0; i < pos.DIMENSION; ++i)
{
std::cout << "Dimension " << i << ": " << pos[i] << std::endl;
}
// same thing
int i = 0;
for (const auto e : pos)
{
std::cout << "Dimension " << i++ << ": " << e << std::endl;
}
return 0;
} //end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Template.cpp | .cpp | 2,318 | 93 | //! [doxygen_snippet_Template]
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
// --------------------------------------------------------------------------
// $Maintainer: Maintainer $
// $Authors: Author1, Author2 $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
using namespace OpenMS;
using namespace std;
//-------------------------------------------------------------
// Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_NewTool
@brief Template for a new Tool
This tool can be used for scientific stuff.
And more scientific applications.
<B>The command line parameters of this tool are:</B>
@verbinclude TOPP_NewTool.cli
<B>INI file documentation of this tool:</B>
@htmlinclude TOPP_NewTool.html
*/
// We do not want this class to show up in the docu:
/// @cond TOPPCLASSES
class TOPPNewTool :
public TOPPBase
{
public:
TOPPNewTool() :
TOPPBase("NewTool", "Template for Tool creation", false)
{
}
protected:
// this function will be used to register the tool parameters
// it gets automatically called on tool execution
void registerOptionsAndFlags_()
{
}
// the main_ function is called after all parameters are read
ExitCodes main_(int, const char **)
{
//-------------------------------------------------------------
// parsing parameters
//-------------------------------------------------------------
//-------------------------------------------------------------
// reading input
//-------------------------------------------------------------
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
return ExitCodes::EXECUTION_OK;
}
};
// the actual main function needed to create an executable
int main(int argc, const char ** argv)
{
TOPPNewTool tool;
return tool.main(argc, argv);
}
/// @endcond
//! [doxygen_snippet_Template]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_FeatureMap.cpp | .cpp | 1,033 | 41 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_FeatureMap]
#include <OpenMS/KERNEL/FeatureMap.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
// Insert of two features into a map and iterate over the features.
FeatureMap map;
Feature feature;
feature.setRT(15.0);
feature.setMZ(571.3);
map.push_back(feature); //append feature 1
feature.setRT(23.3);
feature.setMZ(1311.3);
map.push_back(feature); //append feature 2
// Iteration over FeatureMap
for (auto& f : map)
{
cout << f.getRT() << " - " << f.getMZ() << endl;
}
// Calculate and output the ranges
map.updateRanges();
cout << "Int: " << map.getMinIntensity() << " - " << map.getMaxIntensity() << endl;
cout << "RT: " << map.getMinRT() << " - " << map.getMaxRT() << endl;
cout << "m/z: " << map.getMinMZ() << " - " << map.getMaxMZ() << endl;
} //end of main
//! [doxygen_snippet_FeatureMap]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_MapAlignment.cpp | .cpp | 1,279 | 40 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
#include <OpenMS/ANALYSIS/MAPMATCHING/MapAlignmentAlgorithmPoseClustering.h>
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto tutorial_data_path = OPENMS_DOC_PATH + String("/code_examples/");
FeatureMap reference;
FeatureMap toAlign;
FileHandler xml_file;
xml_file.loadFeatures(tutorial_data_path + "/data/Tutorial_MapAlignment_1.featureXML", reference);
xml_file.loadFeatures(tutorial_data_path + "/data/Tutorial_MapAlignment_2.featureXML", toAlign);
// create map alignment algorithm
MapAlignmentAlgorithmPoseClustering algorithm;
// ... set parameters
algorithm.setReference(reference);
// create object for the computed transformation
TransformationDescription transformation;
// align
algorithm.align(toAlign, transformation);
// store results
xml_file.storeFeatures("Tutorial_MapAlignment_1.featureXML", reference);
xml_file.storeFeatures("Tutorial_MapAlignment_2.featureXML", toAlign);
} // end of main
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_MSExperiment.cpp | .cpp | 3,212 | 90 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//
//! [doxygen_snippet_MSExperiment]
#include <OpenMS/CONCEPT/Types.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/SYSTEM/File.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
// create a peak map containing 4 dummy spectra and peaks
MSExperiment exp;
// The following examples creates a MSExperiment containing four MSSpectrum instances.
for (Size i = 0; i < 4; ++i)
{
MSSpectrum spectrum;
spectrum.setRT(i);
spectrum.setMSLevel(1);
for (float mz = 500.0; mz <= 900; mz += 100.0)
{
Peak1D peak;
peak.setMZ(mz + i);
spectrum.push_back(peak);
}
exp.addSpectrum(spectrum);
}
// Iteration over the RT range (2,3) and the m/z range (603,802) and print the peak positions.
for (auto it = exp.areaBegin(2.0, 3.0, 603.0, 802.0); it != exp.areaEnd(); ++it)
{
cout << it.getRT() << " - " << it->getMZ() << endl;
}
// Iteration over all peaks in the experiment.
// Output: RT, m/z, and intensity
// Note that the retention time is stored in the spectrum (not in the peak object)
for (auto s_it = exp.begin(); s_it != exp.end(); ++s_it)
{
for (auto p_it = s_it->begin(); p_it != s_it->end(); ++p_it)
{
cout << s_it->getRT() << " - " << p_it->getMZ() << " " << p_it->getIntensity() << endl;
}
}
// updateRanges provides a fast way to update the ranges of all spectra and chromatograms in the experiment.
// Once updated, the data ranges for all dimensions (RT, m/z, int, IM) can be printed.
exp.updateRanges();
std::cout << "Data ranges:\n";
exp.spectrumRanges().printRange(std::cout);
std::cout << "\nGet maximum intensity on its own: " << exp.spectrumRanges().getMaxIntensity() << '\n';
std::cout << "Get minimum RT on its own: " << exp.spectrumRanges().getMinRT() << '\n';
std::cout << "Get maximum RT on its own: " << exp.spectrumRanges().getMaxRT() << '\n';
std::cout << "Get minimum m/z on its own: " << exp.spectrumRanges().getMinMZ() << '\n';
std::cout << "Get maximum m/z on its own: " << exp.spectrumRanges().getMaxMZ() << '\n';
// Printing the IM ranges is only possible if the spectra contain IM data (would throw an exception otherwise)
if (!exp.spectrumRanges().RangeMobility::isEmpty())
{
std::cout << "Get minimum IM on its own: " << exp.spectrumRanges().getMinMobility() << '\n';
std::cout << "Get maximum IM on its own: " << exp.spectrumRanges().getMaxMobility() << '\n';
}
// Store the spectra to a mzML file with:
FileHandler fh;
auto tmp_filename = File::getTemporaryFile();
fh.storeExperiment(tmp_filename, exp, {FileTypes::MZML});
// And load it with
fh.loadExperiment(tmp_filename, exp);
// If we wanted to load only the MS2 spectra we could speed up reading by setting:
fh.getOptions().setMSLevels({2});
// and then load from disk:
fh.loadExperiment(tmp_filename, exp);
// note: the file in 'tmp_filename' will be automatically deleted
return 0;
} // end of main
//! [doxygen_snippet_MSExperiment]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_Precursor.cpp | .cpp | 2,054 | 58 | // Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
//! [doxygen_snippet_Precursor]
#include <OpenMS/CONCEPT/Exception.h>
#include <OpenMS/FORMAT/FileHandler.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/METADATA/Precursor.h>
#include <OpenMS/openms_data_path.h> // exotic header for path to tutorial data
#include <iostream>
using namespace OpenMS;
using namespace std;
int main(int argc, const char** argv)
{
auto file_mzML = OPENMS_DOC_PATH + String("/code_examples/data/Tutorial_GaussFilter.mzML");
MSExperiment spectra;
// load mzML from code examples folder
FileHandler().loadExperiment(file_mzML, spectra);
// iterate over map and output MS2 precursor information
for (auto s_it = spectra.begin(); s_it != spectra.end(); ++s_it)
{
// we are only interested in MS2 spectra so we skip all other levels
if (s_it->getMSLevel() != 2) continue;
// get a reference to the precursor information
const MSSpectrum& spectrum = *s_it;
const vector<Precursor>& precursors = spectrum.getPrecursors();
// size check & throw exception if needed
if (precursors.empty()) throw Exception::InvalidSize(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, precursors.size(), "precursors vector must not be empty");
// get m/z and intensity of precursor
double precursor_mz = precursors[0].getMZ();
float precursor_int = precursors[0].getIntensity();
// retrieve the precursor spectrum (the most recent MS1 spectrum)
PeakMap::ConstIterator precursor_spectrum = spectra.getPrecursorSpectrum(s_it);
double precursor_rt = precursor_spectrum->getRT();
// output precursor information
std::cout << " precursor m/z: " << precursor_mz
<< " intensity: " << precursor_int
<< " retention time (sec.): " << precursor_rt
<< std::endl;
}
return 0;
} // end of main
//! [doxygen_snippet_Precursor]
| C++ |
3D | OpenMS/OpenMS | doc/code_examples/Tutorial_ResidueModification.cpp | .cpp | 1,636 | 46 | //! [doxygen_snippet_ResidueModification]
// Copyright (c) 2002-present, OpenMS Inc. -- EKU Tuebingen, ETH Zurich, and FU Berlin
// SPDX-License-Identifier: BSD-3-Clause
#include <OpenMS/CHEMISTRY/AASequence.h>
#include <OpenMS/CHEMISTRY/ResidueModification.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <iostream>
using namespace OpenMS;
using namespace std;
int main()
{
// construct a AASequence object, query a residue
// and output some of its properties
AASequence aas = AASequence::fromString("DECIANGER");
cout << aas[2].getName() << " "
<< aas[2].getFormula().toString() << " "
<< aas[2].getModificationName() << " "
<< aas[2].getMonoWeight() << endl;
// find a modification in ModificationsDB
// and output some of its properties
// getInstance() returns a pointer to a ModsDB instance
const ResidueModification* mod = ModificationsDB::getInstance()->getModification("Carbamidomethyl (C)");
cout << mod->getOrigin() << " "
<< mod->getFullId() << " "
<< mod->getDiffMonoMass() << " "
<< mod->getMonoMass() << endl;
// set the modification on a residue of a peptide
// and output some of its properties (the formula and mass have changed)
// in this case ModificationsDB is used in the background
// to relate the name of the mod to its attributes
aas.setModification(2, "Carbamidomethyl (C)");
cout << aas[2].getName() << " "
<< aas[2].getFormula().toString() << " "
<< aas[2].getModificationName() << " "
<< aas[2].getMonoWeight() << endl;
return 0;
} //end of main
//! [doxygen_snippet_ResidueModification]
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.