keyword
stringclasses
7 values
repo_name
stringlengths
8
98
file_path
stringlengths
4
244
file_extension
stringclasses
29 values
file_size
int64
0
84.1M
line_count
int64
0
1.6M
content
stringlengths
1
84.1M
language
stringclasses
14 values
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/similarity_searches_protocols/import_hhsuite.py
.py
26,712
642
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Module for parsing HH-suite (https://github.com/soedinglab/hh-suite) results files and to import their content in PyMod. """ import os import re import shutil import gzip import urllib.request from Bio import SeqIO from pymol.Qt import QtWidgets, QtCore from pymol import cmd from pymod_lib.pymod_externals.hhsuite.hh_reader import read_result from pymod_lib.pymod_externals.hhsuite.hhmakemodel import to_seq from pymod_lib.pymod_externals.hhsuite.hhmakemodel import main as hhmakemodel_main from pymod_lib.pymod_gui.shared_gui_components_qt import (small_font_style, highlight_color, PyMod_protocol_window_qt, PyMod_hbox_option_qt, askyesno_qt, askopenfile_qt) from pymod_lib.pymod_protocols.similarity_searches_protocols._base_blast import (Generic_BLAST_search, Similarity_searches_results_window_qt) from pymod_lib.pymod_threading import Protocol_exec_dialog from pymod_lib.pymod_exceptions import PyModInterruptedProtocol from pymod_lib.pymod_protocols.structural_databases_protocols import Associate_structure from pymod_lib.pymod_os_specific import check_network_connection ############################################################################### # Classes to import HH-suite results in PyMod. # ############################################################################### class Import_HHsuite_results(Generic_BLAST_search): def additional_initialization(self): pass def launch_from_gui(self, mode="hhr"): # Import template search results from HH-suite. if mode == "hhr": self.launch_from_gui_hhr() # Import a multiple sequence alignment from HH-suite. elif mode == "a3m": self.launch_from_gui_a3m() else: raise KeyError("Unknown 'mode': {}".format(mode)) ########################################################################### # Import results from a HHR file. # ########################################################################### import_error_message = "Import HH-suite Results Error" def launch_from_gui_hhr(self): """ Import template search results from HH-suite. """ # Select the file to open. self.hhr_filepath = askopenfile_qt("Select a HHR file to open", name_filter="*.hhr", parent=self.pymod.get_qt_parent()) if not self.hhr_filepath: return None # Parses the hhr results file. try: hhr_results = read_result(self.hhr_filepath) except Exception as e: self.pymod.main_window.show_error_message(self.import_error_message, ("The HHR results file appears to be invalid and the following error" " was raised: {}".format(str(e)))) return None # Check for empty results. if not hhr_results: self.pymod.main_window.show_warning_message(self.import_error_message, "Empty HHR file: this file does not contain any alignment.") return None # Store the results. self.hhr_results = hhr_results self.query_id = self.hhr_results[0].query_id if len(self.query_id) > 35: self.query_id = self.query_id[0:35] + "..." # Shows the results window. self.import_hhr_window = HHsuite_import_hhr_window_qt(parent=self.pymod.main_window, protocol=self) self.import_hhr_window.show() def get_subject_name(self, hsp, max_len=100): t = "{} ({})".format(hsp.template_id, hsp.template_info[1:]) if len(t) > max_len: t = t[0:max_len] + "..." return t.replace("\n", "") def get_hsp_evalue(self, hsp): return hsp.evalue def get_prob_value(self, hsp): return hsp.probability def get_hsp_query_seq(self, hsp): return to_seq(hsp.query_ali) def get_hsp_subj_seq(self, hsp): return to_seq(hsp.template_ali) def blast_results_state(self): """ Called when the "SUBMIT" button is pressed on the results window. """ # For each hsp takes the state of its check-box. self.my_blast_map = [chk.isChecked() for chk in self.import_hhr_window.sbjct_checkbuttons_list] self.import_hhr_window.destroy() # If the user selected at least one template. if any(self.my_blast_map): # This will actually import the sequences inside PyMod. self.import_results_in_pymod() def import_results_in_pymod(self): """ Builds a list containing those hits that were selected by the user in the BLAST results window. """ if not check_network_connection("https://google.com", timeout=3): has_network = False else: has_network = True #------------------------------------------ # Get the templates selected by the user. - #------------------------------------------ self.imported_hsp_list = [] for hsp_counter, hsp in enumerate(self.hhr_results): if self.my_blast_map[hsp_counter]: template_id = hsp.template_id hsp_dict = {"hsp": hsp, "pdb_code": None, "chain_id": None, "hsp_num": str(hsp_counter + 1), "successful": False} if re.match("([a-zA-Z0-9]{4})_([A-Za-z])$", template_id): pdb_code, chain_id = template_id.split("_") hsp_dict["pdb_code"] = pdb_code hsp_dict["chain_id"] = chain_id self.imported_hsp_list.append(hsp_dict) # Check if the CIF files of the hits can be fetched from the PDB. pdb_hsp_list = [h for h in self.imported_hsp_list if h["pdb_code"] is not None] if len(pdb_hsp_list) == len(self.imported_hsp_list): if has_network: message = ("Would you like to fetch from the Internet the 3D" " structures of the templates that you selected?") fetch_cif_files = askyesno_qt("Fetch 3D structures?", message, parent=self.pymod.get_qt_parent()) else: message = ("No network connection. The template 3D structures can" " not be fetched now from the Internet.") self.pymod.main_window.show_warning_message(self.import_error_message, message) fetch_cif_files = False elif len(pdb_hsp_list) == 0: fetch_cif_files = False else: if has_network: n_pdb_missing = len(self.imported_hsp_list)-len(pdb_hsp_list) message = ("You selected {} (out of {}) hit sequences which do not appear" " to be valid templates from the PDB. No 3D structure" " will be fetched now. You can fetch the PDB structures" " of each hit sequence having a valid PDB id later at any" " moment".format(n_pdb_missing, len(self.imported_hsp_list))) self.pymod.main_window.show_warning_message(self.import_error_message, message) fetch_cif_files = False #-------------------------------------- # Prepare the input and output files. - #-------------------------------------- # Prepare the templates input CIF directory. This will be needed by the # 'hhmakemodel.py' script. self.tem_in_dirpath = os.path.join(self.output_directory, "hhsuite_tem_input") if os.path.isdir(self.tem_in_dirpath): shutil.rmtree(self.tem_in_dirpath) os.mkdir(self.tem_in_dirpath) # Prepare the templates output CIF directory. This will be needed by the # 'hhmakemodel.py' script. self.tem_out_dirpath = os.path.join(self.output_directory, "hhsuite_tem_output") if os.path.isdir(self.tem_out_dirpath): shutil.rmtree(self.tem_out_dirpath) os.mkdir(self.tem_out_dirpath) # Set the path of the ouput PIR file generated by the 'hhmakemodel.py' script. self.pir_out_filepath = os.path.join(self.output_directory, "hhsuite_alignment.pir") #------------------------------------ # Actually downloads the CIF files. - #------------------------------------ if fetch_cif_files: try: if not self.pymod.use_protocol_threads: self.download_all_cif_files() else: label_text = ("Connecting to the PDB to download %s. Please wait for" " completion..." % self.get_seq_text(self.imported_hsp_list, "CIF file")) p_dialog = Protocol_exec_dialog(app=self.pymod.main_window, pymod=self.pymod, function=self.download_all_cif_files, args=(), title="Downloading CIF files", wait_start=0.15, wait_end=0.15, label_text=label_text) p_dialog.exec_() except PyModInterruptedProtocol: return None # Check if there were some structures which could not be fetched. n_failures = len([h for h in self.imported_hsp_list if not h["successful"]]) if n_failures != 0: title = "Download Error" message = ("Can not access the PDB database to download %s structures out of %s." " These templates will not be imported in PyMod." "\nPlease check your Internet connection or if the PDB ids of the" " structures are valid." % (n_failures, len(self.imported_hsp_list))) self.pymod.main_window.show_warning_message(title, message) if n_failures == len(self.imported_hsp_list): self.pymod.main_window.show_warning_message("Import HHsuite Results Error", "No templates could be fetched. Quitting.") return None self.selected_templates_nums = [h["hsp_num"] for h in self.imported_hsp_list if h["successful"]] else: self.selected_templates_nums = [h["hsp_num"] for h in self.imported_hsp_list] #------------------------------------ # Runs the 'hhmakemodel.py' script. - #------------------------------------ # Prepare the arguments for the hhsuite function. hhsuite_args = {"input": self.hhr_filepath, "cifs": self.tem_in_dirpath, "pir": self.pir_out_filepath, "output": self.tem_out_dirpath, "verbose": True, "m": self.selected_templates_nums, "e": None, "r": 0, # Do not use any filters. "c": True} # Run the hhsuite function. hhmakemodel_main(hhsuite_args) #---------------------------------------------------------- # Parse the output PIR file produced by the above script. - #---------------------------------------------------------- elements_to_load = [] ali_records = list(SeqIO.parse(self.pir_out_filepath, "pir")) for record_idx, record in enumerate(ali_records): if record_idx == 0: element_name = self.query_id else: if fetch_cif_files: element_name = "__temp_template_{}__".format(record_idx-1) else: element_name = self.imported_hsp_list[record_idx-1]["hsp"].template_id new_element = self.pymod.build_pymod_element_from_args(element_name, str(record.seq).replace("*", "-")) elements_to_load.append(new_element) self.pymod.add_element_to_pymod(new_element) # Add a new alignment object to PyMod in which contains the query sequence # and that of the selected templates. new_cluster = self.pymod.add_new_cluster_to_pymod(cluster_type="alignment", child_elements=elements_to_load, algorithm="imported") #----------------------------------------------- # Actually imports the final results in PyMod. - #----------------------------------------------- if fetch_cif_files: for record_idx, record in enumerate(ali_records): if record_idx == 0: continue chain = record.description.split(":")[3] modified_cif_filepath = os.path.join(self.tem_in_dirpath, "{}.cif".format(record.id)) modified_pdb_filepath = os.path.join(self.tem_in_dirpath, "{}.pdb".format(record.id)) tem_temp_name = "_hhsuite_template_{}".format(record_idx) cmd.load(modified_cif_filepath, tem_temp_name) cmd.save(modified_pdb_filepath, tem_temp_name) cmd.delete(tem_temp_name) try: a = Associate_structure(self.pymod, elements_to_load[record_idx]) a.associate(modified_pdb_filepath, chain) except Exception as e: title = "Associate Structure Failure" message = ("The structure association for %s chain %s failed because" " of the following error: %s" % (record.id, chain, str(e))) self.pymod.main_window.show_error_message(title, message) #------------- # Cleans up. - #------------- if os.path.isdir(self.tem_in_dirpath): shutil.rmtree(self.tem_in_dirpath) if os.path.isdir(self.tem_out_dirpath): shutil.rmtree(self.tem_out_dirpath) if os.path.isfile(self.pir_out_filepath): os.remove(self.pir_out_filepath) self.pymod.main_window.gridder(update_clusters=True, update_menus=True, update_elements=True) def download_all_cif_files(self): """ This method will call other methods to tetch the CIF files needed to import the HHsuite templates in PyMod. """ for hsp_dict in self.imported_hsp_list: self._fetch_single_element(hsp_dict) def _fetch_single_element(self, hsp_dict): """ Download the CIF file for a single element. """ pdb_code = hsp_dict["pdb_code"] output_name = "{}.cif".format(pdb_code) try: pdb_url = "https://files.rcsb.org/download/{}.cif.gz".format(pdb_code) temp_gzip_file_name = urllib.request.urlretrieve(pdb_url)[0] open_gzip_file = gzip.open(temp_gzip_file_name) # Uncompress the file while reading output_path = os.path.join(self.tem_in_dirpath, output_name) saved_file = open(output_path, 'wb') saved_file.write(open_gzip_file.read()) # Write pdb file open_gzip_file.close() saved_file.close() hsp_dict["successful"] = True except: pass ########################################################################### # Import a multiple sequence alignment from a A3M file built by HH-suite. # ########################################################################### def launch_from_gui_a3m(self): # Select the file to open. self.a3m_filepath = askopenfile_qt("Select a A3M file to open", name_filter="*.a3m *.oa3m", parent=self.pymod.get_qt_parent()) if not self.a3m_filepath: return None # Parses the a3m file. error_title = "Import HH-suite Results Error" try: a3m_content = [] records = SeqIO.parse(self.a3m_filepath, "fasta") for record in records: a3m_content.append((record.id, str(record.seq))) except Exception as e: self.pymod.main_window.show_error_message(self.import_error_message, ("The A3M file appears to be invalid and the following error" " was raised: {}".format(str(e)))) return None # Check for empty results. if not a3m_content: self.pymod.main_window.show_warning_message(self.import_error_message, "Empty A3M file: this file does not contain any aligned sequence.") return None # Store the results. self.a3m_content = a3m_content self.query_id = self.a3m_content[0][0] if len(self.query_id) > 35: self.query_id = self.query_id[0:35] + "..." # Shows the options window. self.import_a3m_window = HHsuite_import_a3m_window_qt(parent=self.pymod.main_window, protocol=self, title="A3M import options", upper_frame_title="Otions for importing an A3M file content", submit_command=self.a3m_import_state) self.import_a3m_window.show() def a3m_import_state(self): # Gets the number of sequences to import. try: n_seqs_to_import = int(self.import_a3m_window.seqs_to_import_spinbox.value()) except Exception as e: self.pymod.main_window.show_error_message("Input Error", "Non valid numeric input. Please correct.") return None # Convert the sequences in the A3M format in FASTA format. fasta_content = convert_items_a3m_to_fasta(self.a3m_content[0:n_seqs_to_import+1]) # Import the results in PyMod. elements_to_load = [] for item_idx, item in enumerate(fasta_content): new_element = self.pymod.build_pymod_element_from_args(item[0], item[1]) elements_to_load.append(new_element) self.pymod.add_element_to_pymod(new_element) new_cluster = self.pymod.add_new_cluster_to_pymod(cluster_type="alignment", child_elements=elements_to_load, algorithm="imported") self.import_a3m_window.destroy() self.pymod.main_window.gridder(update_clusters=True, update_menus=True, update_elements=True) def convert_items_a3m_to_fasta(a3m_items): """ Receives a 'items' list. Each element in the list is a two-element tuple containg the id of a sequence and its aligned sequence in the A3M format (see: https://github.com/soedinglab/hh-suite/wiki#generating-a-multiple-sequence-alignment-using-hhblits). Returns a similar list, in which the sequences are in the FASTA format. """ full_alignments = [list(item[1]) for item in a3m_items] for i, seq_i in enumerate(full_alignments): for pos_idx, pos in enumerate(seq_i): if pos.islower(): for j, seq_j in enumerate(full_alignments): if i != j: seq_j.insert(pos_idx, "-") fasta_items = [] for item_idx, a3m_item in enumerate(a3m_items): fasta_item = (a3m_item[0], "".join(full_alignments[item_idx]).upper()) fasta_items.append(fasta_item) return fasta_items ############################################################################### # GUI. # ############################################################################### class HHsuite_import_hhr_window_qt(Similarity_searches_results_window_qt): """ Window for showing similarity searches results. """ def _get_window_title(self): return "Import HH-suite Results" def _get_upper_frame_title(self): title_text = ("HHsearch Output for: %s\nFound %s sequences\nPlease Select the Sequences to" " Import" % (self.protocol.query_id, len(self.protocol.hhr_results))) return title_text def display_blast_hits(self): """ This is used to display in the BLAST results window information for each hit and a checkbutton to select it for importing it inside PyMod. """ # Shows the headers of the columns. headers_font_style = "%s; color: %s; font-weight: bold" % (small_font_style, highlight_color) self.blast_seq_label = QtWidgets.QLabel("Name") self.blast_seq_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.blast_seq_label, 0, 0) self.blast_e_val_label = QtWidgets.QLabel("E-Value") self.blast_e_val_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.blast_e_val_label, 0, 1) self.prob_val_label = QtWidgets.QLabel("Probability") self.prob_val_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.prob_val_label, 0, 2) self.blast_iden_label = QtWidgets.QLabel("Identity") self.blast_iden_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.blast_iden_label, 0, 3) self.query_span_label = QtWidgets.QLabel("Query span") self.query_span_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.query_span_label, 0, 4) self.subject_span_label = QtWidgets.QLabel("Template span") self.subject_span_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.subject_span_label, 0, 5) # Displays in the results window the hsps found in the output file of the # similarity search program. self.blast_output_row = 1 self.sbjct_checkbuttons_list = [] # List containing the checkbutton widgets. for hsp in self.protocol.hhr_results: # Hit name and checkbox. hsp_checkbox = QtWidgets.QCheckBox(self.protocol.get_subject_name(hsp)) hsp_checkbox.setStyleSheet(small_font_style) self.sbjct_checkbuttons_list.append(hsp_checkbox) self.results_grid.addWidget(hsp_checkbox, self.blast_output_row, 0) # E-value info. evalue_label = QtWidgets.QLabel("%.2e" % (self.protocol.get_hsp_evalue(hsp))) evalue_label.setStyleSheet(small_font_style) self.results_grid.addWidget(evalue_label, self.blast_output_row, 1) # Probability. probability_label = QtWidgets.QLabel(str(self.protocol.get_prob_value(hsp))) probability_label.setStyleSheet(small_font_style) self.results_grid.addWidget(probability_label, self.blast_output_row, 2) # Get alignment stats. matches_count, identities_count = self.protocol.get_hsp_matches(hsp) seqid = identities_count/matches_count # Sequence identity. identities_label = QtWidgets.QLabel("{}/{} ({:.1f}%)".format(identities_count, matches_count, seqid*100)) identities_label.setStyleSheet(small_font_style) self.results_grid.addWidget(identities_label, self.blast_output_row, 3) # Query span info. query_span_val = (hsp.end[0]-hsp.start[0])/hsp.query_length query_span_info_text = "{}-{} ({:.1f}%)".format(hsp.start[0], hsp.end[0], query_span_val*100) span_info_label = QtWidgets.QLabel(query_span_info_text) span_info_label.setStyleSheet(small_font_style) self.results_grid.addWidget(span_info_label, self.blast_output_row, 4) # Subject span info. tem_span_val = (hsp.end[1]-hsp.start[1])/hsp.template_length tem_span_info_text = "{}-{} ({:.1f}%)".format(hsp.start[1], hsp.end[1], tem_span_val*100) hspan_info_label = QtWidgets.QLabel(tem_span_info_text) hspan_info_label.setStyleSheet(small_font_style) self.results_grid.addWidget(hspan_info_label, self.blast_output_row, 5) self.blast_output_row += 1 class HHsuite_import_a3m_window_qt(PyMod_protocol_window_qt): """ Window with options for importing multiple sequence alignments from a3m files. """ default_offset = 10 def build_protocol_middle_frame(self): n_tot_sequences = len(self.protocol.a3m_content) - 1 # Sub-frame created to select the number of sequences to import. range_label = "Import the first n. sequences (tot: {})".format(n_tot_sequences) self.range_subframe = PyMod_hbox_option_qt(label_text=range_label) # Spinbox to select the number of sequences to import. self.seqs_to_import_spinbox = QtWidgets.QSpinBox() self.seqs_to_import_spinbox.setRange(1, n_tot_sequences) self.seqs_to_import_spinbox.setValue(min((100, n_tot_sequences))) self.range_subframe.hbox.addWidget(self.seqs_to_import_spinbox) self.range_subframe.set_auto_input_widget_width() self.middle_formlayout.add_widget_to_align(self.range_subframe)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/similarity_searches_protocols/hmmsearch.py
.py
7,053
176
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Module for performing hmmsearch (profile-vs-sequence) searches in PyMod. It mainly builds up on the code used to perform BLAST searches and phmmer searches. """ import os from Bio import SeqIO from pymod_lib.pymod_protocols.similarity_searches_protocols.phmmer import PHMMER_search, Phmmer_options_window_qt from pymod_lib.pymod_os_specific import get_exe_file_name from pymod_lib.pymod_seq.seq_manipulation import remove_gap_only_columns ################################################################################################ # HMMSEARCH. # ################################################################################################ class Hmmsearch_search(PHMMER_search): blast_version = "hmmsearch" protocol_name = blast_version exe_filename = None all_exe_filenames = ["hmmbuild", "hmmsearch"] def launch_from_gui(self): # Check if a correct selection is provided. selected_clusters_list = self.pymod.get_selected_clusters() if len(selected_clusters_list) == 0: title = "Selection Error" message = "Please select an entire alignment object to perform a %s search" % (self.blast_version_full_name) self.pymod.main_window.show_error_message(title, message) return None if len(selected_clusters_list) > 1: title = "Selection Error" message = "Please select only one alignment object to perform a %s search" % (self.blast_version_full_name) self.pymod.main_window.show_error_message(title, message) return None if not self.check_blast_program(): return None # Gets the selected sequence. The main index will be used later to build the cluster. self.blast_query_element = selected_clusters_list[0] # Opens the options window. self.build_blast_window() def get_blast_window_class_qt(self): return Hmmsearch_options_window_qt def execute_hmmer_program(self, query_filepath, out_filepath, db_filepath, exe_filepath, hmmbuild_exe_filepath, report_evalue=10e-6): """ Execute the locally installed hmmbuild and hmmsearch. """ # Launch hmmbuild to buil a profile HMM from the alignment file saved from # PyMod. hmm_filepath = query_filepath + ".hmm" command_ls = [hmmbuild_exe_filepath, hmm_filepath, query_filepath] self.pymod.new_execute_subprocess(command_ls) # Launche hmmsearch. self.hmmscan_ali_filepath = out_filepath + ".sth" command_ls = [exe_filepath, "-o", out_filepath, "-A", self.hmmscan_ali_filepath, "--domE", str(report_evalue), hmm_filepath, db_filepath] self.pymod.new_execute_subprocess(command_ls) def import_results_in_pymod(self): """ Builds a cluster with the hit sequences. """ # The list of elements whose sequences will be updated according to the star alignment. hsp_elements = [] use_hmmer_pdb = self.db_filename.startswith("pdbaa") #------------------------------------------------------------ # Builds a new cluster with the query and all the new hits. - #------------------------------------------------------------ query_original_index = self.pymod.get_pymod_element_index_in_container(self.blast_query_element) # Creates PyMod elements for all the imported hits and add them to the cluster. for h in self.hsp_imported_from_blast: cs = self.pymod.build_pymod_element_from_hsp(self.get_hsp_subj_seq(h["hsp"]), self.get_hsp_element_title(h, use_hmmer_pdb)) self.pymod.add_element_to_pymod(cs) hsp_elements.append(cs) # Builds the "BLAST search" cluster element. new_blast_cluster = self.pymod.add_new_cluster_to_pymod( cluster_type="profile-cluster", child_elements=hsp_elements, algorithm=self.blast_version_full_name, update_stars=False) # Move the new cluster to the same position of the original query element in PyMod main # window. self.pymod.change_pymod_element_list_index(new_blast_cluster, query_original_index) #--------------------------------- # Updates the aligned sequences. - #--------------------------------- if os.path.isfile(self.hmmscan_ali_filepath): # For each sequence imported in PyMod, searches in the output alignment # of hmmsearch for a sequence with the same amino acids in order to # import in PyMod the alignment generated by hmmsearch. hsp_recs = SeqIO.parse(self.hmmscan_ali_filepath, "stockholm") # Stores the gapless sequences of the hits imported in PyMod. hsp_seqs = [str(e.my_sequence).replace("-", "") for e in hsp_elements] # Iter through the msa sequences (which contain also the hits not # imported in PyMod). for h in hsp_recs: hsp_msa_aliseq = str(h.seq).upper() hsp_msa_seq = hsp_msa_aliseq.replace("-", "") # Search a matching sequence in the hits imported in PyMod. for hsp_idx, (hsp_seq, hsp_element) in enumerate(zip(hsp_seqs, hsp_elements)): if hsp_seq == hsp_msa_seq: # Updates the sequence. hsp_element.my_sequence = hsp_msa_aliseq hsp_seqs.pop(hsp_idx) hsp_elements.pop(hsp_idx) break if not hsp_elements: break remove_gap_only_columns(new_blast_cluster) #------------ # Finishes. - #------------ # Cleans and update the PyMod main window. self.finish_blast_search() self.pymod.main_window.gridder(clear_selection=True, update_clusters=True, update_menus=True, update_elements=True) ################################################################################################### # GUI. # ################################################################################################### class Hmmsearch_options_window_qt(Phmmer_options_window_qt): """ Window for HMMSEARCH searches. """ pass
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/updater_protocols/__init__.py
.py
7,840
184
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Module for updating the BLAST and HMMER databases in PyMod by downloading them from the NCBI and EBI servers. """ import os import subprocess import json import time from pymod_lib.pymod_os_specific import get_exe_file_name, get_formatted_date, check_network_connection from pymod_lib.pymod_protocols.base_protocols import PyMod_protocol from pymod_lib.pymod_vars import data_installer_log_filename from .updater_internal import all_components_list, Pfam_db_installer, BLAST_db_installer from .updater_gui import InstallerUpdaterWindow class UpdaterProtocol(PyMod_protocol): protocol_name = "database_updater" def __init__(self, pymod): PyMod_protocol.__init__(self, pymod) # selected components to be updated/installed self.selected_comps = [] self.pending_downloads = 0 self.pymod_data_dirpath = os.path.join(self.pymod.current_pymod_dirpath, self.pymod.data_dirname) # Path of the file where the download dates will be stored. self.download_log_filepath = os.path.join(self.pymod_data_dirpath, data_installer_log_filename) def launch_from_gui(self): # If there is no internet connection, everything is blocked. if not check_network_connection('https://www.google.it/'): self.pymod.main_window.show_error_message("Connection Error", "No internet connection. Can not update PyMod databases.") return None # Associates the correct installer to each component. self.components_dict_installer = {} self.components_list = [] # Builds Qthread objects. _all_components_list = all_components_list[:] if self.pymod.use_blast_v5_databases: _all_components_list.pop(0) # Remove BLAST v4 databases. else: _all_components_list.pop(1) # Remove BLAST v5 databases. for comp, installer_class in zip(_all_components_list, [BLAST_db_installer, Pfam_db_installer, BLAST_db_installer]): component_qthread = installer_class(component=comp) self.components_dict_installer[comp.name] = component_qthread if comp.name == 'hmmscan_databases': comp.installer.hmmpress_exe_filepath = os.path.join(self.pymod.hmmer_tool["exe_dir_path"].get_value(), get_exe_file_name("hmmpress")) self.components_list.append(comp) comp.reinitialize() # Showing GUI. self.window = InstallerUpdaterWindow(parent=self.pymod.main_window, installer_protocol=self) #---------------- # Installation. - #---------------- def install_selected(self): """Called from the GUI slot that responds to the Install Selected button""" # Builds the data directory. if not os.path.isdir(self.pymod_data_dirpath): os.mkdir(self.pymod_data_dirpath) # Builds a temporary directory in PyMod configuration folder in /home/user # where to unzip the external tools and data components installation directories. # The files will later be moved in PyMod directory. self.build_temp_directory_in_cfg_dir() # Prepares to download the components in this temporary directory. # for inst in self.components_dict_installer.values(): # inst.download_destination_dirpath = self.temp_files_cfg_directory_path for comp in self.selected_comps: comp.installer.download_destination_dirpath = self.temp_files_cfg_directory_path self.pending_downloads = len(self.selected_comps) # Starts the installer QThread for every selected component in the GUI. for comp in self.selected_comps: comp.installer.install_mode = "download" time.sleep(0.1) comp.installer.start() self.window.activate_progressbar() def finish_installation(self): """ Updates the download log and changes the GUI. """ last_downloaded_dict = {} installed_count = 0 for comp in self.selected_comps: try: print(comp) for element in comp.installer.downloaded_filepath_list: print(element) comp.installer.unzip_downloaded_files(comp.target_installation_path, in_thread=False) except: continue if comp.installer.installation_status == "success": # Set the last downloaded time. last_downloaded_dict[comp.name] = comp.last_downloaded installed_count += 1 # Writes the downloads log. if last_downloaded_dict: # Get the values from the old log, if present. if os.path.isfile(self.download_log_filepath): old_downloaded_dict = {} with open(self.download_log_filepath, "r") as l_fh: old_downloaded_dict = json.loads(l_fh.read()) # Updates with new values. for k in old_downloaded_dict: if not k in last_downloaded_dict: last_downloaded_dict[k] = old_downloaded_dict[k] # Actually writes the log. with open(self.download_log_filepath, "w") as l_fh: l_fh.write(json.dumps(last_downloaded_dict)) # Close the database updater GUI. message = ("Database update terminated successfully for %s of %s" " components." % (installed_count, len(self.selected_comps))) self.window.info_slot(message, None) # self.window.close() def build_temp_directory_in_cfg_dir(self): self.temp_files_cfg_directory_path = os.path.join(self.pymod.cfg_directory_path, self.pymod.pymod_temp_directory_name) # self.temp_files_cfg_directory_path = self.pymod.temp_directory_dirpath if not os.path.exists(self.temp_files_cfg_directory_path): os.mkdir(self.temp_files_cfg_directory_path) def set_component_target_path(self, component): """ Set installation paths inside the 'data' directory inside the PyMod directory. It will produce something like: /home/user/pymod/data/blast_databases. """ if component.name == 'blast_databases': path_list = [self._get_database_dirpath("blast", "database_dir_path")] elif component.name == 'hmmer_databases': path_list = [self._get_database_dirpath("hmmer", "database_dir_path")] elif component.name == "hmmscan_databases": path_list = [self._get_database_dirpath("hmmer", "hmmscan_db_dir_path")] else: raise KeyError("Unknown component: %s" % (component.name)) try: component.target_installation_path = os.path.join(*path_list) except TypeError: # Some path was undefined. component.target_installation_path = None def _get_database_dirpath(self, tool_name, param_name): if tool_name == "blast": param_value = self.pymod.blast_plus[param_name].get_value() elif tool_name == "hmmer": param_value = self.pymod.hmmer_tool[param_name].get_value() else: raise KeyError("Unknown tool: %s" % (tool_name)) if param_value.replace(" ", "") == "": return None else: return param_value
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/updater_protocols/updater_gui.py
.py
18,386
368
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import traceback from socket import gaierror import json from pymol.Qt import QtWidgets, QtCore, QtGui class Ui_UpdateDialog: n_cols = 5 # Number of columns in the updater window frame. component_col_idx = 0 databases_col_idx = 1 status_col_idx = 2 source_col_idx = 3 last_download_col_idx = 4 def setupUi(self, UpdateDialog): UpdateDialog.setObjectName("UpdateDialog") UpdateDialog.resize(950, 470) self.verticalLayout = QtWidgets.QVBoxLayout(UpdateDialog) self.verticalLayout.setObjectName("verticalLayout") self.select_comp_label = QtWidgets.QLabel(UpdateDialog) self.select_comp_label.setObjectName("select_comp_label") self.verticalLayout.addWidget(self.select_comp_label) self.components_tableWidget = QtWidgets.QTableWidget(UpdateDialog) default_font = QtGui.QFont() default_font.setPointSize(default_font.pointSize()-1) self.components_tableWidget.setFont(default_font) self.components_tableWidget.setProperty("showDropIndicator", False) self.components_tableWidget.setDragDropOverwriteMode(False) self.components_tableWidget.setAlternatingRowColors(True) self.components_tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.components_tableWidget.setGridStyle(QtCore.Qt.NoPen) self.components_tableWidget.setColumnCount(self.n_cols) self.components_tableWidget.setObjectName("components_tableWidget") self.components_tableWidget.setRowCount(1) vertical_header_item = QtWidgets.QTableWidgetItem() self.components_tableWidget.setVerticalHeaderItem(0, vertical_header_item) for i in range(self.n_cols): item = QtWidgets.QTableWidgetItem() self.components_tableWidget.setHorizontalHeaderItem(i, item) item = QtWidgets.QTableWidgetItem() self.components_tableWidget.setItem(0, self.component_col_idx, item) item = QtWidgets.QTableWidgetItem() self.components_tableWidget.setItem(0, self.databases_col_idx, item) self.components_tableWidget.horizontalHeader().setVisible(True) self.components_tableWidget.horizontalHeader().setCascadingSectionResizes(False) self.components_tableWidget.setColumnWidth(self.component_col_idx, 210) self.components_tableWidget.setColumnWidth(self.databases_col_idx, 180) self.components_tableWidget.setColumnWidth(self.status_col_idx, 190) # self.components_tableWidget.setColumnWidth(self.source_col_idx, 390) # self.components_tableWidget.setColumnWidth(self.last_download_col_idx, 250) self.components_tableWidget.horizontalHeader().setStretchLastSection(True) self.components_tableWidget.verticalHeader().setVisible(False) self.components_tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) self.verticalLayout.addWidget(self.components_tableWidget) self.statusLabel = QtWidgets.QLabel(UpdateDialog) self.statusLabel.setObjectName("statusLabel") self.verticalLayout.addWidget(self.statusLabel) self.installation_progressBar = QtWidgets.QProgressBar(UpdateDialog) self.installation_progressBar.setEnabled(True) self.installation_progressBar.setProperty("value", 10) self.installation_progressBar.setObjectName("installation_progressBar") self.verticalLayout.addWidget(self.installation_progressBar) self.buttonsHorizontalLayout = QtWidgets.QHBoxLayout() self.buttonsHorizontalLayout.setObjectName("buttonsHorizontalLayout") self.installSel_button = QtWidgets.QPushButton(UpdateDialog) self.installSel_button.setObjectName("installSel_button") self.buttonsHorizontalLayout.addWidget(self.installSel_button) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.buttonsHorizontalLayout.addItem(spacerItem) self.cancel_button = QtWidgets.QPushButton(UpdateDialog) self.cancel_button.setObjectName("cancel_button") self.buttonsHorizontalLayout.addWidget(self.cancel_button) self.verticalLayout.addLayout(self.buttonsHorizontalLayout) UpdateDialog.setWindowTitle("Install and update databases") self.select_comp_label.setText("Select components to install or update") self.components_tableWidget.setSortingEnabled(True) self.components_tableWidget.horizontalHeaderItem(self.component_col_idx).setText("Component") self.components_tableWidget.horizontalHeaderItem(self.databases_col_idx).setText("Databases Names") self.components_tableWidget.horizontalHeaderItem(self.status_col_idx).setText("Status") self.components_tableWidget.horizontalHeaderItem(self.source_col_idx).setText("Source") self.components_tableWidget.horizontalHeaderItem(self.last_download_col_idx).setText("Last Downloaded") __sortingEnabled = self.components_tableWidget.isSortingEnabled() self.components_tableWidget.setSortingEnabled(False) self.components_tableWidget.setSortingEnabled(__sortingEnabled) self.statusLabel.setText("") self.cancel_button.setText("Cancel") self.installSel_button.setText("Install Selected") QtCore.QMetaObject.connectSlotsByName(UpdateDialog) class InstallerUpdaterWindow(QtWidgets.QDialog, Ui_UpdateDialog): is_pymod_window = True def __init__(self, parent=None, installer_protocol=None): super(InstallerUpdaterWindow, self).__init__(parent) self.setupUi(self) self.installer_protocol = installer_protocol self._bind_thread_objects() # sets the thread objects as children of this window self.view = self.components_tableWidget self.items_status_dict = {} self.items_last_download_dict = {} #puts the 'Install selected' button as default # self.installSel_button.setFocus() # shows the progress bar self.installation_progressBar.setValue(0) self.installation_progressBar.setVisible(True) # connections self._connections() # shows in the ListView the list of PyMod components self._fill_list() # shows the window itself self.show() # ping to server self.start_internet_check() def _connections(self): """Connecting signals with slots.""" self.cancel_button.clicked.connect(self.on_cancel_button_click) self.installSel_button.clicked.connect(self.install_selected_slot) for comp in self.installer_protocol.components_list: # Used to show the size of the download before starting the donwload. comp.installer.retrieved_size.connect(self.set_starting_status_slot) # Updates the text in the "Status" column. comp.installer.set_update_status.connect(self.set_update_status_slot) # Terminates the installation. comp.installer.terminated_installation.connect(self.terminated_installation_slot) # Called when the installation of a component fails. comp.installer.critical_error.connect(self.critical_error_slot) # Shows a information messagebox. comp.installer.info_message.connect(self.info_slot) def on_cancel_button_click(self): self._terminate_threads() self.close() def _terminate_threads(self): for comp in self.installer_protocol.selected_comps: if comp.installer.isRunning(): comp.installer.installation_status = "stopped" def closeEvent(self, evnt): self._terminate_threads() def show_message_in_label(self, message): self.statusLabel.setText(message) def activate_progressbar(self): self.installation_progressBar.setMinimum(0) self.installation_progressBar.setMaximum(0) # self.show_message_in_label("Updating %s components" % self.installer_protocol.pending_downloads) def terminated_installation_slot(self, component): # Updates the "Last Downloaded" column. self.items_last_download_dict[component.name].setText(component.last_downloaded) self.updated_gui_on_thread_finish("installation_terminated") def critical_error_slot(self, error_message, comp): """Aborts installation if an error occurred""" comp.installer.installation_status = "failed" comp.installer.exit(1) self.set_update_status_slot(comp, "Updated Failed: %s" % error_message, "red") self.updated_gui_on_thread_finish("failure") def updated_gui_on_thread_finish(self, status): if not status in ("download_terminated", "installation_terminated", "failure"): raise KeyError(status) # downloaded_comps = [comp for comp in self.installer_protocol.selected_comps if comp.installer.installation_status == "downloaded"] failed_comps = [comp for comp in self.installer_protocol.selected_comps if comp.installer.installation_status == "failed"] installed_comps = [comp for comp in self.installer_protocol.selected_comps if comp.installer.installation_status == "success"] running_comps = [comp for comp in self.installer_protocol.selected_comps if comp.installer.installation_status != "failed"] if status == "failure": if len(failed_comps) == len(self.installer_protocol.selected_comps): # self.show_message_in_label("Update failed.") self.installation_progressBar.setMaximum(100) self.installation_progressBar.setValue(0) elif status == "installation_terminated": # All the components have been installed. if len(installed_comps) == len(running_comps): # self.show_message_in_label("Update completed.") self.installation_progressBar.setMaximum(100) self.installation_progressBar.setValue(100) self.installer_protocol.finish_installation() def info_slot(self, message, comp): """shows an info message, does not abort the installation""" self.installer_protocol.pymod.main_window.show_info_message("Warning", message) def set_starting_status_slot(self, component, size_download): """It responds to the established_connection signal emitted by the thread. The signal carries the file size, that will be displayed in the window.""" # gets the graphic object that displays the status, from a dictionary previously created status_item_object = self.items_status_dict[component.name]# [self.status_col_idx] status_item_object.setText('Available: '+str(int(size_download/1048576))+' MB') status_item_object.setForeground(QtGui.QBrush(QtGui.QColor(204, 255, 204))) def set_update_status_slot(self, component, text="new status", color="light_green"): status_item_object = self.items_status_dict[component.name] status_item_object.setText(text) if color == "light_green": status_item_object.setForeground(QtGui.QBrush(QtGui.QColor(204, 255, 204))) elif color == "green": status_item_object.setForeground(QtGui.QBrush(QtGui.QColor(10, 255, 10))) elif color == "gray": status_item_object.setForeground(QtGui.QBrush(QtGui.QColor(200, 200, 200))) elif color == "red": status_item_object.setForeground(QtGui.QBrush(QtGui.QColor(255, 0, 0))) else: raise KeyError(color) def install_selected_slot(self): """This is the action executed after the 'Install Selected' button is pressed. It calls the 'install_selected' method of the Updater Protocol class.""" # Check if there are any selected databases. self.installer_protocol.selected_comps = [] for comp in self.items_dict.keys(): # Check the status of each checkbutton and get the ones having been selected by the user. if self.items_dict[comp].checkState() == QtCore.Qt.Checked: self.installer_protocol.selected_comps.append(comp) if not self.installer_protocol.selected_comps: self.info_slot("Please select at least one database to download.", None) return None # Check if the selected databases can be downloaded. for cmp in self.installer_protocol.selected_comps: # Check if the server has been reached while pinging before. if not cmp.can_be_downloaded: error_mess = "The %s database can not be currently downloaded. Please uncheck it." % (cmp.full_name) self.info_slot(error_mess, None) return None # Trying to figure out if there are hmmer executables. If not, blocks the installation, # because it can't index the databases without hmmpress. if cmp.name == 'hmmscan_databases': if not os.path.isfile(cmp.installer.hmmpress_exe_filepath): error_mess = ("Cannot install PFAM databases since a 'hmmscan' executable was" " not found in the HMMER executables directory specified in the" " PyMod options window ('%s'). In order to update the PFAM database," " please provide an 'hmmscan' executable, it is necessary for the" " indicization of the compressed database" % os.path.dirname(cmp.installer.hmmpress_exe_filepath)) self.info_slot(error_mess, None) return None # Sets the target path for the components. self.installer_protocol.set_component_target_path(cmp) # Checks the target path. if cmp.target_installation_path == None: error_mess = ("Cannot install %s databases, because its databases directory is not" " defined in the PyMod options. Please uncheck it or define it in the" " PyMod options window." % (cmp.full_name)) self.info_slot(error_mess, None) return None if not os.path.isdir(cmp.target_installation_path): error_mess = ("Cannot install %s databases, because its databases directory defined in" " the PyMod options (%s) does not exists. Please uncheck it or define an" " existing directory in the PyMod options window." % (cmp.full_name, cmp.target_installation_path)) self.info_slot(error_mess, None) return None self.installSel_button.setEnabled(False) # Freezing the button. self.installer_protocol.install_selected() def _bind_thread_objects(self): """this method sets the window as the Thread parent, in order to work on the threads as GUI children """ for c in self.installer_protocol.components_list: c.installer.setParent(self) def start_internet_check(self): """Ping method that also retrieves the file size""" for comp in self.installer_protocol.components_list: # this flag tells the installer not to download everything but only to ping the server comp.installer.install_mode = "ping" comp.installer.start() def _fill_list(self): """ Create the list's data. """ self.items_dict = {} # Checks the database log in order to obtain the date when each database was last downloaded. download_log_dict = {} if os.path.isfile(self.installer_protocol.download_log_filepath): with open(self.installer_protocol.download_log_filepath, "r") as l_fh: download_log_dict = json.loads(l_fh.read()) # Configure the list of items is the 'all_components_list' from the PyMod Installer class. self.view.setRowCount(len(self.installer_protocol.components_list)) for row_counter, component in enumerate(self.installer_protocol.components_list): # Create an item and set the component name as text. item = QtWidgets.QTableWidgetItem(component.full_name) # add a checkbox to the name item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled) item.setCheckState(QtCore.Qt.Unchecked) # place the items in columns self.view.setItem(row_counter, self.component_col_idx, item) self.items_dict.update({component: item}) # Set the names of the databases. self.view.setItem(row_counter, self.databases_col_idx, QtWidgets.QTableWidgetItem(component.databases_string)) # Create another item displaying the status. graphic_status = 'Wait...' status = QtWidgets.QTableWidgetItem(graphic_status) status.setForeground(QtGui.QBrush(QtGui.QColor(191, 191, 191))) self.view.setItem(row_counter, self.status_col_idx, status) # Set the source URL. self.view.setItem(row_counter, self.source_col_idx, QtWidgets.QTableWidgetItem(component.remote_source)) # Fill in the last downloaded column. if component.name in download_log_dict: last_downloaded_str = download_log_dict[component.name] else: last_downloaded_str = "Never" last_downloaded_item = QtWidgets.QTableWidgetItem(last_downloaded_str) self.view.setItem(row_counter, self.last_download_col_idx, last_downloaded_item) self.items_status_dict[component.name] = status self.items_last_download_dict[component.name] = last_downloaded_item def keyPressEvent(self, event): if event.key() == QtCore.Qt.Key_Escape: pass else: QtWidgets.QDialog.keyPressEvent(self, event)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/updater_protocols/updater_internal.py
.py
23,431
530
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import gzip import shutil import subprocess import time import traceback from ftplib import FTP from pymol.Qt import QtCore from pymod_lib.pymod_vars import blast_databases_dirname, hmmer_databases_dirname, hmmscan_databases_dirname from pymod_lib.pymod_os_specific import get_formatted_date ################################################################################################## # Supporting classes and functions. # ################################################################################################## class PyMod_component: def __init__(self, name, full_name, subdir_name=None, remote_source=None, databases_string=None): self.name = name self.full_name = full_name self.target_installation_path = None self.remote_source = remote_source self.installer = None self.subdir_name = subdir_name self.can_be_downloaded = False self.databases_string = databases_string self.last_downloaded = None def reinitialize(self): self.target_installation_path = None self.can_be_downloaded = False self.last_downloaded = None def gunzip_shutil(source_filepath, dest_dirpath, block_size=65536): """ Support for gzipped files. """ dest_filename = os.path.basename(source_filepath).replace('.gz', '') dest_filepath = os.path.join(dest_dirpath, dest_filename) with gzip.open(source_filepath, 'rb') as s_file, open(dest_filepath, 'wb') as d_file: shutil.copyfileobj(s_file, d_file, block_size) def is_compressed(file_name): return file_name.endswith(compressed_files_extensions) ################################################################################################## # DEFAULT VARIABLES # ################################################################################################## # Installer files information. compressed_files_extensions = ('.tar.gz', '.gz', '.tgz', '.zip', '.tar', '.tar.xz', '.txz', '.tar.bz2', '.tbz2') # Lists of PyMod databases that can be installed through the PyMod installer GUI. all_components_list = [ # BLAST+ v4. # Old BLAST+ versions (< 2.8.0) only recognize "version 4" databases # (found in the "v4" directory of the BLAST ftp site). The default directory, # contains "version 5" databases, which are only compatible with newer BLAST+ # versions. PyMod_component(name=blast_databases_dirname, full_name="BLAST+ v4 databases", remote_source="ftp://ftp.ncbi.nlm.nih.gov/blast/db/v4/", subdir_name=["swissprot_v4", "pdbaa_v4"], databases_string="swissprot, pdbaa"), # BLAST+ v5. PyMod_component(name=blast_databases_dirname, full_name="BLAST+ databases", remote_source="ftp://ftp.ncbi.nlm.nih.gov/blast/db/", subdir_name=["swissprot", "pdbaa"], databases_string="swissprot, pdbaa"), # PFAM. PyMod_component(name=hmmscan_databases_dirname, full_name="HMMSCAN database", remote_source="ftp://ftp.ebi.ac.uk/pub/databases/Pfam/current_release/", databases_string="PFAM"), # FASTA files for HMMER programs. PyMod_component(name=hmmer_databases_dirname, full_name="HMMER databases", remote_source="ftp://ftp.ncbi.nlm.nih.gov/blast/db/FASTA/", subdir_name=["swissprot", "pdbaa"], databases_string="swissprot, pdbaa") ] ################################################################################################### # PYMOD COMPONENT INSTALLER CLASSES # ################################################################################################### class InstallerQThread(QtCore.QThread): """ A QThread wrapper for the installers. Contains methods that make sense only when this class is extended together with a PyMod_component_installer (or sub) class, creating a subclass that inherits both from this class and PyMod_component_installer. """ critical_error = QtCore.pyqtSignal(str, PyMod_component) info_message = QtCore.pyqtSignal(str, PyMod_component) established_connection = QtCore.pyqtSignal(PyMod_component) retrieved_size = QtCore.pyqtSignal(PyMod_component, int) set_update_status = QtCore.pyqtSignal(PyMod_component, str, str) terminated_installation = QtCore.pyqtSignal(PyMod_component) freeze_thread = QtCore.pyqtSignal(PyMod_component, int) def run(self): """this method is never called alone (see QThread documentation) but is executed when the Qthread.start() method is called if the flag 'install_mode' is True, the thread will install the databases, if not, it will only retrieve the file size from the server.""" # Only pings the remote source. if self.install_mode == "ping": # Returns 'False' if there is a connection error. time.sleep(0.5) connection = self.ping() if connection: self.component.can_be_downloaded = True else: try: # Actually performs the installation. self.download_and_install() except TerminateQThread as e: pass # emette un type error, oltre al gaierror, se non c'e' connessione internet. # Emette anche un EOFError e un TimeoutError se non fa in tempo a scaricare. except (gaierror, TypeError) as e: self.critical_error.emit("Cannot connect to server. Please check Internet connection.", self.component) except EOFError as e: self.critical_error.emit("Timeout expired for connection. Try again later.", self.component) except Exception as e: msg = str(e) self.critical_error.emit(msg, self.component) traceback.print_exc() ########################################################## # Wrapper for the signals. Used in installer subclasses. # ########################################################## def emit_signal(self, signal_name, *args, **kwargs): # getattr(self, signal_name).emit(*args, **kwargs) if signal_name == "set_update_status": self.set_update_status.emit(*args, **kwargs) elif signal_name == "retrieved_size": self.retrieved_size.emit(*args, **kwargs) elif signal_name == "critical_error": self.critical_error.emit(*args, **kwargs) elif signal_name == "terminated_installation": self.terminated_installation.emit(*args, **kwargs) else: raise KeyError("Unknown 'signal_name': %s" % signal_name) class PyMod_component_installer(InstallerQThread): """The installer class must be extended together with the InstallerQThread class. Use the 'build_installer_qthread' function in 'updater_gui' module. """ def __init__(self, component, download_destination_dirpath=''): InstallerQThread.__init__(self) self.component = component self.component.installer = self self.installation_status = None self.download_destination_dirpath = download_destination_dirpath self.download_destination_filepath = '' self.downloaded_filepath_list = [] # Initializes the 'install_mode'. self.install_mode = None def has_zipped_files(self): if [fp for fp in self.downloaded_filepath_list if is_compressed(fp)]: return True else: return False def download(self): '''This method performs the installation and must set the installation_status flag as True or False. It has to be overridden in subclasses.''' raise NotImplementedError def unzip_downloaded_files(self, destination_dirpath, explicit_list_of_files=None, in_thread=True): self._unzip_downloaded_files(destination_dirpath=destination_dirpath, explicit_list_of_files=explicit_list_of_files, in_thread=in_thread) def _unzip_downloaded_files(self, destination_dirpath, explicit_list_of_files=None, in_thread=True): # Shutil is the best choice here, bc calling tarfile or zipfile separately for each format is # error-prone, while shutil.unpack_archive handles every case on its own. # However, shutil does not support simple '.gz' compressed files. I have to register an unpack format, # named "bio_gz". Then, I provide a list of extensions corresponding to the format. # Cannot put '.gz' directly, because it may overlap with the .tar.gz format and I don't want to mess # with builtin modules. # Then, the method requires a callable that will be used to unpack archives. # The callable must receive the path of the archive, followed by the directory # the archive must be extracted to. This callable is created in this module, is the reimplementation of # the gunzip command, called gunzip_shutil. try: shutil.register_unpack_format("bio_gz", [".fasta.gz", ".hmm.gz", ".gz"], gunzip_shutil) except shutil.RegistryError: # if it is already registered, ignore it. pass # selecting compressed files if not explicit_list_of_files: zip_files = [fp for fp in self.downloaded_filepath_list if is_compressed(fp)] else: zip_files = explicit_list_of_files # decompressing for element in zip_files: if not os.path.exists(destination_dirpath): os.makedirs(destination_dirpath) # In Python 3.8, unpacking a tar.gz file in this QThread causes a # segmentation fault. They will be upacked later in the main thread. if in_thread and element.endswith(".tar.gz"): continue shutil.unpack_archive(element, destination_dirpath) os.remove(element) # cleaning compressed files self.downloaded_filepath_list.remove(element) def finish_generic_installation(self): """Concludes an installation with facultative additional actions, then sets the installation_status flag as True and the component installed path.""" self.additional_actions() # installation successful self.installation_status = "success" def additional_actions(self, *args, **kwargs): """Additional actions to perform after the download and the unzipping, in order to conclude the installation, e.g. the HMM database preparation with hmmpress""" # to be overridden. pass def build_destination_filepath(self, filename): new_filepath = os.path.join(self.download_destination_dirpath, filename) self.downloaded_filepath_list.append(new_filepath) self.download_destination_filepath = new_filepath return new_filepath class PyMod_FTP_component_installer(PyMod_component_installer): def __init__(self, component, destination_dirpath=''): PyMod_component_installer.__init__(self, component, destination_dirpath) self.hostname, self.subdir = self.get_host_and_subdir_from_component_url() def get_host_and_subdir_from_component_url(self): '''From the component.remote_source URL, retrieves the host name (es. "ftp.ebi.ac.uk") and the subdirectory path of the tool (es. "./pub/databases/Pfam/current_release/database_files/")''' remoteurl = self.component.remote_source hostname, subdir = remoteurl.replace('ftp://', '').split('/', 1) return hostname, subdir def ping(self): """this method checks for the server and retrieves the size of the compressed file(s)""" try: ftp = FTP(self.hostname, timeout=10) ftp.login() if self.subdir: ftp.cwd(self.subdir) files_on_server = self.get_files_on_server(ftp) item_basenames = self.get_item_basenames(files_on_server) self.set_total_size(ftp, item_basenames) # sets the self.total_size attribute ftp.quit() return True except Exception as e: # traceback.print_exc() self.emit_signal("set_update_status", self.component, "Connection Error (%s)" % e, "red") return False def ftp_connect(self, host, subdirectory_path=None): """this method connects to the FTP server and downloads the files, handling signals and connection closing. Instruction to download each specific database file must be written in 'retrieve_files' method.""" ftp = FTP(host, timeout=30) ftp.login() if subdirectory_path: ftp.cwd(subdirectory_path) # emitting the 'set_update_status' signal to show in the table that the donwload has started. self.emit_signal("set_update_status", self.component, "Downloading...", "light_green") # this method actually downloads the files and must be implemented in subclasses self.retrieve_files(ftp) ftp.quit() def set_total_size(self, ftp_connection, filenames): total_size = 0 ftp_connection.sendcmd("TYPE i") # Switch to Binary mode total_size = sum([ftp_connection.size(filename) for filename in filenames]) ftp_connection.sendcmd("TYPE A") # Switch back to ASCII mode self.total_size = total_size self.emit_signal("retrieved_size", self.component, total_size) return total_size def retrieve_files(self, ftp_connection): '''this method must provide the code for the download of the correct files and return the downloaded file path. It has to be overridden in subclasses.''' raise NotImplementedError def download_and_install(self): """ This is the method that is called externally. It calls 'ftp_connect' and handles its exceptions. After the download has finished, it proceeds to prepare the database files for PyMod (that is, it proceeds to the "installation" of the database). """ # Downloads the component files. try: self.ftp_connect(self.hostname, subdirectory_path=self.subdir) # Once the download has been completed, updates the GUI. self.installation_status = "downloaded" self.emit_signal("set_update_status", self.component, "Downloaded", "light_green") except Exception as e: # traceback.print_exc() messagestr = 'Error during the download of %s via FTP: %s' % (self.component.full_name, e) self.emit_signal("critical_error", messagestr, self.component) return None # "Installs" the component files. try: # Unzips the files. if self.has_zipped_files(): self.emit_signal("set_update_status", self.component, "Unpacking...", "light_green") self.unzip_downloaded_files(self.component.target_installation_path) # Performs additional actions. if self.installation_status == "stopped": return None self.finish_generic_installation() # Set the last downloaded time. self.component.last_downloaded = get_formatted_date() self.emit_signal("set_update_status", self.component, "Completed", "green") self.emit_signal("terminated_installation", self.component) except Exception as e: # traceback.print_exc() messagestr = 'Error during the installation of %s: %s' % (self.component.full_name, e) self.emit_signal("critical_error", messagestr, self.component) # subclasses class Pfam_db_installer(PyMod_FTP_component_installer): pfam_hmm_filename = "Pfam-A.hmm" def __init__(self, component, destination_dirpath=''): PyMod_FTP_component_installer.__init__(self, component, destination_dirpath) self.hmmpress_exe_filepath = None def get_files_on_server(self, ftp_connection): listdir = [] ftp_connection.dir(listdir.append) return listdir def get_item_basenames(self, listdir): """ Searches for the 'Pfam-A.hmm.gz' file in the ftp directory. """ for item in listdir: item_basename = item.split()[-1] if item_basename == self.pfam_hmm_filename + '.gz': return [item_basename] raise ValueError("HMM file not found on PFAM server.") def retrieve_files(self, ftp_connection): listdir = self.get_files_on_server(ftp_connection) item_basename = self.get_item_basenames(listdir) download_ftp_with_batches(ftp_connection=ftp_connection, ftp_source='RETR ' + item_basename[0], dest_filepath=self.build_destination_filepath(item_basename[0]), installer=self) def additional_actions(self): # message = ("The Pfam database has been successfully downloaded. It will" # " now be decompressed using the HMMPRESS tool. Please wait as" # " it might take a little while.") self.emit_signal("set_update_status", self.component, "Running hmmpress...", "light_green") # Remove the databases files. Their presence will prevent hmmpress to build the new database. for ext in (".h3m", ".h3i", ".h3f", ".h3p"): db_filepath = os.path.join(self.component.target_installation_path, self.pfam_hmm_filename + ext) if os.path.isfile(db_filepath): os.remove(db_filepath) # Actually launches hmmpress to complete the database installation. database_filepath = os.path.join(self.component.target_installation_path, self.pfam_hmm_filename) try: assert os.path.exists(self.hmmpress_exe_filepath) extract_hmmer_databases(database_filepath, self.hmmpress_exe_filepath) #little loop to ensure that ALL the files are present in the directory #they are big and the following command must not be executed if the extraction is incomplete extracted_files = [i for i in os.listdir(self.component.target_installation_path) if not i.endswith('.hmm') and not i.startswith('.')] while len(extracted_files) < 4: time.sleep(0.5) os.remove(database_filepath) except: # traceback.print_exc() message = ("Error in decompressing Pfam databases with HMMPRESS.") raise Exception(message) def extract_hmmer_databases(hmmer_data_filepath, hmmpress_exepath): """Indexing HMM databases with hmmpress""" cline = [hmmpress_exepath, hmmer_data_filepath] print("- Executing the following command:", cline) subprocess.check_call(cline) class BLAST_db_installer(PyMod_FTP_component_installer): def __init__(self, component, destination_dirpath=''): PyMod_FTP_component_installer.__init__(self, component, destination_dirpath) def get_files_on_server(self, ftp_connection): listdir = [] ftp_connection.retrlines('MLSD', listdir.append) listdir = [i for i in listdir if not i.endswith('md5')] # throw away the md5 checksumm files self.blastdbs = [] for name_of_db in self.component.subdir_name: self.blastdbs.extend([i for i in listdir if name_of_db in i]) return self.blastdbs def get_item_basenames(self, listdir): if self.blastdbs: item_basenames = [item.split(';')[-1].strip() for item in self.blastdbs] return item_basenames else: return [] def retrieve_files(self, ftp_connection): self.get_files_on_server(ftp_connection) item_basenames = self.get_item_basenames(self.blastdbs) if item_basenames: for item_basename in item_basenames: download_ftp_with_batches(ftp_connection=ftp_connection, ftp_source='RETR ' + item_basename, dest_filepath=self.build_destination_filepath(item_basename), installer=self) else: raise NotImplementedError del self.blastdbs def unzip_downloaded_files(self, base_destination_dirpath, explicit_list=None, in_thread=True): # selecting compressed files zip_files = [fp for fp in self.downloaded_filepath_list if is_compressed(fp)] zip_dict = {} for subd_name in self.component.subdir_name: zip_dict.update({subd_name: [f for f in zip_files if subd_name in f]}) for k in zip_dict.keys(): new_subdir = os.path.join(base_destination_dirpath, k) self._unzip_downloaded_files(new_subdir, zip_dict[k], in_thread=in_thread) # adjustment of files # another for loop bc the unzipping MUST be finished, if included in the previous for loop # raises errors and neglects some files for k in zip_dict.keys(): new_subdir = os.path.join(base_destination_dirpath, k) content = [l for l in os.listdir(new_subdir) if not l.startswith('.')] tab_files = [f for f in content if f.startswith('tax')] for tf in tab_files: os.remove(os.path.join(new_subdir, tf)) if self.component.name != 'blast_databases': for file_to_move in content: dest_filepath = os.path.join(base_destination_dirpath, file_to_move+'.fasta') if os.path.isfile(dest_filepath): os.remove(dest_filepath) os.rename(os.path.join(new_subdir, file_to_move), dest_filepath) os.removedirs(new_subdir) def download_ftp_with_batches(ftp_connection, ftp_source, dest_filepath, installer): """ Downloads an FTP file, and for each downloaded batch checks whether the thread has stopped if it has, the download is also stopped. This system allows to quit a download thread without abruptly terminating the thread (which might freeze the entire application). """ with open(dest_filepath, 'wb') as fh: def ftp_callback(batch_data): if installer.installation_status != "stopped": fh.write(batch_data) else: raise TerminateQThread("Stop download") resp = ftp_connection.retrbinary(ftp_source, callback=ftp_callback) class TerminateQThread(Exception): pass
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/structural_databases_protocols/__init__.py
.py
26,630
558
# Copyright 2016 by Chengxin Zhang, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import gzip import urllib.request import time from pymod_lib.pymod_os_specific import check_network_connection from pymod_lib.pymod_structure import Parsed_pdb_file from pymod_lib.pymod_protocols.base_protocols import PyMod_protocol from pymod_lib.pymod_threading import Protocol_exec_dialog from pymod_lib.pymod_exceptions import PyModInterruptedProtocol ################################################################################################### # Fetch PDB files. # ################################################################################################### class Fetch_structure_file(PyMod_protocol): """ Class for downloading a PDB file from the sequences retrieved from BLAST. """ protocol_name = "fetch_pdb" def __init__(self, pymod, output_directory=None, mode=None, structures_to_fetch=None, import_mode=None): PyMod_protocol.__init__(self, pymod, output_directory) self.mode = mode self.structures_to_fetch = structures_to_fetch self.import_mode = import_mode def initialize_from_gui(self, mode, structures_to_fetch): # Builds a list of structures to be fetched. self.structures_to_fetch = [] if mode == "single": self.structures_to_fetch.append(structures_to_fetch) elif mode == "selection": self.structures_to_fetch.extend(self.pymod.get_selected_sequences()) def launch_from_gui(self): """ Let the user choose the way in which to retrieve the structures. """ if not check_network_connection("https://www.google.com"): message = ("Could not connect to the Internet to download structure files" " from the PDB. Please check your Internet connection.") self.pymod.main_window.show_error_message("Connection Error", message) return None self.fetch_pdb_dialog = Fetch_pdb_dialog(self.pymod.main_window, pymod=self.pymod) self.fetch_pdb_dialog.exec_() # Gets the import mode from the GUI and then fetch the files. self.import_mode = self.fetch_pdb_dialog.fetch_choice # Interrupt the process if users close the dialog window. if self.import_mode not in ("single-chain", "multiple-chains"): return None self.fetch_pdb_files() def fetch_pdb_files(self): """ This method will call other methods to tetch the PDB files and load the corresponding elements in PyMod. """ # List which will store information for each downloaded file. self.fetch_results_list = [] try: if not self.pymod.use_protocol_threads: self.download_all_pdb_files() else: label_text = ("Connecting to the PDB to download %s. Please wait for" " completion..." % self.get_seq_text(self.structures_to_fetch, "PDB file")) p_dialog = Protocol_exec_dialog(app=self.pymod.main_window, pymod=self.pymod, function=self.download_all_pdb_files, args=(), title="Downloading PDB files", wait_start=0.15, wait_end=0.15, label_text=label_text) p_dialog.exec_() except PyModInterruptedProtocol: return None # Warns the user if some structure files could not be fetched. n_failures = len([r for r in self.fetch_results_list if r["failure"]]) if n_failures != 0: title = "Download Error" message = ("Can not access the PDB database to download %s structures out of %s." "\nPlease check your Internet connection or if the PDB ids of the" " structures are valid." % (n_failures, len(self.structures_to_fetch))) self.pymod.main_window.show_warning_message(title, message) if n_failures == len(self.fetch_results_list): return None # Actually loads the elements in PyMod. for fetch_results in self.fetch_results_list: if not fetch_results["failure"]: try: self._load_structure(fetch_results) except Exception as e: title = "Associate Structure Failure" message = ("The structure association for %s chain %s failed because" " of the following error: %s" % (fetch_results["pdb_code"], fetch_results["pdb_chain_id"], str(e))) self.pymod.main_window.show_error_message(title, message) self.pymod.main_window.gridder(clear_selection=True, update_elements=True, update_clusters=True) def download_all_pdb_files(self): """ Downloads all the structure files from the PDB. """ for element in self.structures_to_fetch: fetch_results = self._fetch_single_element(element) self.fetch_results_list.append(fetch_results) def _fetch_single_element(self, old_element): """ Download the PDB file for a single element. """ pdb_code = str(old_element.pdb_id) pdb_chain_id = str(old_element.pdb_chain) fetch_results = {"pdb_code": pdb_code, "pdb_chain_id": pdb_chain_id, "element": old_element} try: pdb_file_shortcut = fetch_structure_file(pdb_code=pdb_code, output_dir=self.pymod.temp_directory_dirpath) fetch_results["failure"] = False fetch_results["pdb_file_shortcut"] = pdb_file_shortcut except IOError: fetch_results["failure"] = True return fetch_results def _load_structure(self, fetch_results): """ Parses a downloaded PDB file, builds PyMod elements for its chains and loads the corresponding elements in PyMod/PyMOL. """ old_element = fetch_results["element"] pdb_code = fetch_results["pdb_code"] pdb_file_shortcut = fetch_results["pdb_file_shortcut"] pdb_chain_id = fetch_results["pdb_chain_id"] #---------------------------------------------------------------------------- # Load in PyMod only the chain corresponding to the hit sequence and adjust - # its length to the region identified by BLAST. - #---------------------------------------------------------------------------- if self.import_mode == "single-chain": a = Associate_structure(self.pymod, old_element) a.associate(pdb_file_shortcut, pdb_chain_id) #---------------------------------------------------------------------- # Load each chain found in the PDB file where the 3D structure of the - # hit sequence is present. - #---------------------------------------------------------------------- elif self.import_mode == "multiple-chains": # Deletes the original hit sequence retrieved by BLAST and replaces it with # a new element with an associated structure loaded in PyMOL. old_element.delete() # Builds 'Pymod_elements' objects for each chain present in the PDB file. new_elements = self.pymod.open_structure_file(os.path.abspath(pdb_file_shortcut)) # Color other chains by gray. other_chains_elements = [e for e in new_elements if e.get_chain_id() != pdb_chain_id] if other_chains_elements: self.pymod.main_window.color_selection("multiple", other_chains_elements, "regular", regular_color="gray") class Structure_file_fetcher: """ Class to fetch a PDB file from the internet. """ def __init__(self, pdb_code, output_dir, new_name=None): self.pdb_code = pdb_code self.output_dir = output_dir # Form the pdb output name if new_name: self.output_name = '%s.pdb' % new_name else: self.output_name = '%s.pdb' % pdb_code def fetch(self): """ Actually retrieves the PDB file from the internet. """ pdb_url = "https://files.rcsb.org/download/%s.pdb.gz" % self.pdb_code temp_gzip_file_name = urllib.request.urlretrieve(pdb_url)[0] open_gzip_file = gzip.open(temp_gzip_file_name) # Uncompress the file while reading output_path = os.path.join(self.output_dir, self.output_name) saved_file = open(output_path, 'wb') saved_file.write(open_gzip_file.read()) # Write pdb file open_gzip_file.close() saved_file.close() return output_path def fetch_structure_file(pdb_code, output_dir, new_name=None): sf = Structure_file_fetcher(pdb_code, output_dir, new_name=new_name) return sf.fetch() ################################################################################################### # Associate structures to PyMod elements. # ################################################################################################### from pymol import cmd from pymod_lib.pymod_seq.seq_manipulation import global_pairwise_alignment class Associate_structure(PyMod_protocol): """ Once 'build_structure_objects()' has been used, this will edit the 'PyMod_element' and 'Structure' objects corresponding to the 'chain_id' according to the original element sequence. Usually this is used when fetching a PDB file corresponding to some hit from a BLAST search, because hits in HSPs may have a shorter sequence with respect to the full PDB chain. This method can be called to crop the full 3D chain according to the hit sequence in the HSP. """ temp_full_name = "pymod_full_temp" temp_fragment_name = "pymod_fragment_temp" protocol_name = "associate_structure" def __init__(self, pymod, pymod_element, permissive=True): PyMod_protocol.__init__(self, pymod, output_directory=pymod.temp_directory_dirpath) # Directory in which to save the temporary files. self.temp_directory = self.pymod.temp_directory_dirpath self.target_element = pymod_element self.associate_pdb_file = None self.permissive = permissive def associate(self, pdb_file_path, chain_id): """ Actually associates the structure. """ self._set_options(pdb_file_path, chain_id) # Parses the source structure file. if not self.associate_pdb_file: self.associate_pdb_file = Parsed_pdb_file(self.pymod, self.original_pdb_file_path, copy_original_file=False, save_chains_files=False) #----------------------------------------------------------------------- # Check if the pymod element can be associated to the structure chain. - #----------------------------------------------------------------------- if not self.chain_id in self.associate_pdb_file.get_chains_ids(): raise KeyError("The structure file '%s' does not have chain '%s'." % (self.original_pdb_file_path, self.chain_id)) structure_chain_element = self.associate_pdb_file.get_pymod_element_by_chain(self.chain_id) # Check if the the target sequence and the sequence of the structure to associate match by # aligning the two sequences using dynamic programming. ali = global_pairwise_alignment(self.target_element.my_sequence.replace("-", ""), structure_chain_element.my_sequence.replace("-", ""), toss_modres=True) # If the sequences do not match, interrupt the process. if not self.permissive: if ali["id"] < 99.9: raise ValueError("The target sequence does not match with the sequence of the structure to associate (sequence identity percentage = %s)." % ali["id"]) #------------------------------------------------------------------------------------- # Gets information about matching and missing residues in the two aligned sequences. - #------------------------------------------------------------------------------------- pc = 0 # Alignment position counter. hc = 0 # Target residue counter. tc = 0 # PDB structure residue counter. matching_positions = [] # list of matching positions. missing_positions = [] # list of missing residues in the pdb structure with respect to the target sequence. for hr, tr in zip(ali["seq1"], ali["seq2"]): if hr != "-" and tr != "-" and hr == tr: matching_positions.append({"pc": pc, "hc": hc, "tc": tc}) if tr == "-" and hr != "-": missing_positions.append({"pc": pc, "hc": hc, "tc": tc}) if hr != "-": hc += 1 if tr != "-": tc += 1 pc += 1 # Gets the starting and ending positions (using the PDB numeration) that will be used to # crop the 3D structure. start_position = structure_chain_element.get_residue_by_index(matching_positions[0]["tc"]).db_index end_position = structure_chain_element.get_residue_by_index(matching_positions[-1]["tc"]).db_index #------------------------------------------------ # Use PyMOL to build the new cropped structure. - #------------------------------------------------ # First loads the full PDB structure of the chain in PyMOL. cmd.load(self.original_pdb_file_path, self.temp_full_name) # Select amino acid residues ranging from the starting and ending positions which define # the fragment to "excise". cmd.select(self.temp_fragment_name, "resi %s-%s and chain %s and object %s and not hetatm" % (start_position, end_position, self.chain_id, self.temp_full_name)) # Join the selections and save a file PDB file of the cropped fragment. pdb_filename = os.path.splitext(os.path.basename(self.original_pdb_file_path))[0] # structure_chain_element.get_structure_file_root() pdb_basename = "%s_cropped.pdb" % pdb_filename cropped_structure_file_shortcut = os.path.join(self.temp_directory, pdb_basename) cmd.save(cropped_structure_file_shortcut, self.temp_fragment_name) # Clean up the selections. cmd.delete(self.temp_full_name) cmd.delete(self.temp_fragment_name) #---------------------------------------------------------------------------------- # Builds a 'Parsed_pdb_file' object for the PDB file of the structure just saved. - #---------------------------------------------------------------------------------- p = Parsed_pdb_file(self.pymod, os.path.abspath(cropped_structure_file_shortcut), output_directory=self.pymod.structures_dirpath) new_element_with_structure = p.get_pymod_element_by_chain(self.chain_id) adjust_sequence = self.target_element.my_sequence self.pymod.replace_element(self.target_element, new_element_with_structure) #-------------------------------------------------------------------------------------- # Updates the sequence of the fragment to keep it in frame with the original sequence - # by including the target sequence indels. - #-------------------------------------------------------------------------------------- list_of_missing_positions = [p["hc"] for p in missing_positions] new_sequence = [] adc = 0 for i, p in enumerate(adjust_sequence): if adc in list_of_missing_positions: new_sequence.append("-") else: # Right now modified residues are not included in the cropped structures, # this prevents them from being included in the chain sequence. if p != "X": new_sequence.append(p) else: new_sequence.append("-") if p != "-": adc += 1 new_sequence = "".join(new_sequence) # If the old and new sequences do not match, then align them. This situation does not occur # often, and this procedure might introduce alignment errors (but it will not make the # plugin crash). if new_element_with_structure.my_sequence.replace("-", "") != new_sequence.replace("-", ""): ali_new = global_pairwise_alignment(adjust_sequence, new_element_with_structure.my_sequence, toss_modres=True) new_sequence = ali_new["seq2"] new_element_with_structure.set_sequence(new_sequence, permissive=False) def _set_options(self, pdb_file_path, chain_id): self.original_pdb_file_path = pdb_file_path self.chain_id = chain_id ################################################################# # Launch from the GUI. # ################################################################# def launch_from_gui(self): # Builds a new window. self.associate_structure_window = Associate_structure_window( parent = self.pymod.get_pymod_app(), protocol = self, title = "Associate Structure", upper_frame_title = "Associate 3D Structure Options", submit_command = self.associate_structure_state) # This will be set to 'True' once the users select a valid PDB file and press the 'SUBMIT' # button. self._select_associate_chain = False def associate_structure_state(self): """ Called when users press the 'SUBMIT' window of the 'Associate Structure' protocol. This is actually both when selecting the structure file path and when selecting a chain of the file. """ #----------------------------------------------------------------- # Checks if a correct structure file has been provided as input. - #----------------------------------------------------------------- if not self._select_associate_chain: if not self.associate_structure_window.check_general_input(): return False self.pdb_file_path_from_gui = self.associate_structure_window.get_structure_file() if not os.path.isfile(self.pdb_file_path_from_gui): self.pymod.main_window.show_error_message("File Error", "Please select a valid file path.") return False if not self.pymod.is_valid_structure_file(self.pdb_file_path_from_gui, show_error=False): self.pymod.main_window.show_error_message("File Type Error", "Please select a valid PDB file.") return False # Parses the structure file. self.associate_pdb_file = Parsed_pdb_file(self.pymod, self.pdb_file_path_from_gui, copy_original_file=False, save_chains_files=False) # Gets its chains. self.available_chains = self.associate_pdb_file.get_chains_ids() self.associate_structure_window.show_chain_selection_frame() self._select_associate_chain = True #---------------------------------------------------------------------------------------- # If a valid structure file has been provided, this will try to associate the structure - # of the chain specified in the combobox to the target element. - #---------------------------------------------------------------------------------------- elif self._select_associate_chain: try: self.associate(self.pdb_file_path_from_gui, self.associate_structure_window.get_structure_chain()) self.associate_structure_window.destroy() self.pymod.main_window.gridder(update_elements=True, update_clusters=True) # except Exception, e: except Exception: title = "Associate Structure Failure" # message = "The structure association failed because of the following error: %s" % e message = "The structure association failed because of an error" self.pymod.main_window.show_error_message(title, message) ################################################################################################### # GUI. # ################################################################################################### from pymol.Qt import QtWidgets # from pymod_lib.pymod_gui.shared_gui_components_qt import * class Fetch_pdb_dialog(QtWidgets.QDialog): """ Dialog to select the way in which structure files downloaded from the PDB have to be fetched. """ is_pymod_window = True def __init__(self, parent, pymod, *args, **kwargs): super(Fetch_pdb_dialog, self).__init__(parent, *args, **kwargs) self.initUI() self.fetch_choice = None self.pymod = pymod def initUI(self): self.setWindowTitle('Import PDB Options') vertical_layout = QtWidgets.QVBoxLayout() # Installation options label. info_text = "Please select the 3D structure import mode:" self.fetch_info_label = QtWidgets.QLabel(info_text, self) # self.fetch_info_label.setStyleSheet(label_style_1) vertical_layout.addWidget(self.fetch_info_label) vertical_layout.addStretch(1) # Import options radiobuttons. horizontal_layout = QtWidgets.QHBoxLayout() self.import_all_radiobutton = QtWidgets.QRadioButton("Import in PyMod the structure of every chain of the PDB files.") # self.import_all_radiobutton.setChecked(True) # self.import_all_radiobutton.setStyleSheet(label_font_1) vertical_layout.addWidget(self.import_all_radiobutton) self.import_fragment_radiobutton = QtWidgets.QRadioButton("Import in PyMod only the structure of the hit sequences fragments.") # label_font_1 self.import_fragment_radiobutton.setStyleSheet("margin-bottom: 10px") vertical_layout.addWidget(self.import_fragment_radiobutton) # Import fragments button. self.import_button = QtWidgets.QPushButton("Import 3D Structures", self) # self.import_button.setStyleSheet(label_style_2) self.import_button.clicked.connect(self.on_import_button_click) horizontal_layout.addWidget(self.import_button) horizontal_layout.addStretch(1) # Cancel button. self.cancel_button = QtWidgets.QPushButton('Cancel', self) # self.cancel_button.setStyleSheet(label_style_2) self.cancel_button.clicked.connect(self.on_cancel_button_click) horizontal_layout.addWidget(self.cancel_button) vertical_layout.addLayout(horizontal_layout) self.setLayout(vertical_layout) def on_cancel_button_click(self): self.fetch_choice = None self.close() def on_import_button_click(self): if self.import_all_radiobutton.isChecked(): self.fetch_choice = "multiple-chains" time.sleep(0.1) self.close() elif self.import_fragment_radiobutton.isChecked(): self.fetch_choice = "single-chain" time.sleep(0.1) self.close() else: self.fetch_choice = None message = ("Please select one out of two available import modes in order" " to download the PDB files.") self.pymod.main_window.show_warning_message("Warning", message) # import pymod_lib.pymod_vars as pmdt # from pymod_lib.pymod_gui import shared_gui_components # # class Associate_structure_window(shared_gui_components.PyMod_tool_window): # # def __init__(self, parent = None, protocol = None, **configs): # shared_gui_components.PyMod_tool_window.__init__(self, parent=parent , **configs) # self.current_protocol = protocol # # An entryfield to select the structure file. # self.structure_file_enf = shared_gui_components.PyMod_path_entryfield(self.midframe, # label_text = "Select Structure File", # label_style = shared_gui_components.label_style_1, # path_type = "file", # file_types = pmdt.all_structure_file_types_atl, # askpath_title = "Select Structure File") # self.structure_file_enf.pack(**shared_gui_components.pack_options_1) # self.add_widget_to_align(self.structure_file_enf) # self.add_widget_to_validate(self.structure_file_enf) # self.align_widgets(15) # # def get_structure_file(self): # return self.structure_file_enf.getvalue() # # # def show_chain_selection_frame(self): # # Removes the entryfield to select the structure file. # self.structure_file_enf.pack_forget() # # # Displays a combobox to select the chain id of corresponind to the structure to be # # associated with the target sequence. # self.chain_selection_cbx = shared_gui_components.PyMod_combobox(self.midframe, # label_text = 'Select Chain to Associate', # label_style = shared_gui_components.label_style_1, # scrolledlist_items=self.current_protocol.available_chains) # self.chain_selection_cbx.pack(**shared_gui_components.pack_options_1) # self.chain_selection_cbx.selectitem(0) # self.add_widget_to_align(self.chain_selection_cbx) # self.align_widgets(15) # # def get_structure_chain(self): # return self.chain_selection_cbx.get()
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/clustalo.py
.py
6,834
179
# Copyright 2016 by Chengxin Zhang, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Clustal Omega. """ import os from Bio.Align.Applications import ClustalOmegaCommandline # Protocols. from ._clustal_common import Clustal_regular_alignment, Clustal_profile_alignment # GUI. from ._base_alignment._gui import Regular_alignment_window_qt, Profile_alignment_window_qt from pymod_lib.pymod_gui.shared_gui_components_qt import PyMod_entryfield_qt, PyMod_radioselect_qt class Clustalomega_alignment: """ General Clustal Omega alignments. """ alignment_program = "clustalo" protocol_name = "clustalo" def additional_initialization(self): self.tool = self.pymod.clustalo def get_options_from_gui(self): self.params_from_gui = {} error_from_gui = False self.params_from_gui["use_full_dm"] = self.alignment_window.get_use_full_dm_value() try: self.params_from_gui["iterations"] = self.alignment_window.get_iterations_value() except Exception as e: error_from_gui = True error_message = str(e) # "Invalid Combined Iterations Value." if error_from_gui: self.pymod.main_window.show_error_message("Parameters Error", error_message) return False else: return True class Clustalomega_regular_alignment(Clustalomega_alignment, Clustal_regular_alignment): """ Regular alignments using Clustal Omega. """ def get_alignment_window_class_qt(self): return Clustalo_regular_window_qt def run_regular_alignment_program(self, sequences_to_align, output_file_name): self.run_clustalo(sequences_to_align, output_file_name=output_file_name, extraoption="", iterations=self.params_from_gui["iterations"], use_full_dm=self.params_from_gui["use_full_dm"]) def run_clustalo(self, sequences_to_align, output_file_name=None, extraoption="", iterations=0, use_full_dm=False): self.pymod.build_sequence_file(sequences_to_align, output_file_name, unique_indices_headers=True) input_file_path = os.path.join(self.pymod.alignments_dirpath, output_file_name + ".fasta") output_file_path = os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln") guidetree_file_path = os.path.join(self.pymod.alignments_dirpath, output_file_name + ".dnd") cline = ClustalOmegaCommandline( self.tool["exe_file_path"].get_value(), infile= input_file_path, outfile= output_file_path, guidetree_out=guidetree_file_path, force=True, outfmt="clustal") cline = str(cline) if iterations != 0: cline = "%s --iter=%s" % (cline, iterations) if use_full_dm: cline = "%s --full --full-iter" % (cline) # Run MSA with all sequences using CLustalO command line. self.pymod.execute_subprocess(cline) class Clustalomega_profile_alignment(Clustalomega_alignment, Clustal_profile_alignment): """ Profile alignments for Clustal Omega. """ def get_alignment_window_class_qt(self): return Clustalo_profile_window_qt def prepare_sequence_to_profile_commandline(self, profile_file_shortcut, sequences_to_add_file_shortcut, output_file_shortcut): clustalo_path = self.tool["exe_file_path"].get_value() cline='"' +clustalo_path+'"'+ \ ' --profile1="' +profile_file_shortcut+'"'+ \ ' --outfile="' +output_file_shortcut+'.aln"'+ \ ' --outfmt=clustal --force'+ \ ' '# +self.alignment_window.get_extraoption_value() if len(self.elements_to_add)>1: cline+=' --infile="' +sequences_to_add_file_shortcut+'"' else: cline+=' --profile2="'+sequences_to_add_file_shortcut+'"' if self.params_from_gui["iterations"] != 0: cline = "%s --iter=%s" % (cline, self.params_from_gui["iterations"]) if self.params_from_gui["use_full_dm"]: cline = "%s --full --full-iter" % (cline) return cline def prepare_profile_to_profile_commandline(self, profile1, profile2, output_file_shortcut): clustalo_path = self.tool["exe_file_path"].get_value() cline='"' +clustalo_path+'"' \ ' --profile1="' +profile1+'"'+ \ ' --profile2="' +profile2+'"'+ \ ' --outfile="' +output_file_shortcut+'.aln"' \ ' --outfmt=clustal --force' \ ' '# +self.alignment_window.get_extraoption_value() if self.params_from_gui["iterations"] != 0: cline = "%s --iter=%s" % (cline, self.params_from_gui["iterations"]) if self.params_from_gui["use_full_dm"]: cline = "%s --full --full-iter" % (cline) return cline ################################################################################################### # GUI. # ################################################################################################### class Clustalo_base_window_qt: """ Base class for ClustalOmega protocols. """ def build_algorithm_options_widgets(self): # Use full distance matrix. self.use_full_dm_rds = PyMod_radioselect_qt(label_text="Use Full Distance Matrix", buttons=('Yes', 'No')) self.use_full_dm_rds.setvalue("No") self.middle_formlayout.add_widget_to_align(self.use_full_dm_rds) # Number of (combined guide-tree/HMM) iterations. self.clustalo_iterations_enf = PyMod_entryfield_qt(label_text="Combined Iterations", value='0', validate={'validator': 'integer', 'min': 0, 'max': 5}) self.middle_formlayout.add_widget_to_align(self.clustalo_iterations_enf) self.middle_formlayout.set_input_widgets_width("auto") def get_iterations_value(self): return self.clustalo_iterations_enf.getvalue(validate=True) def get_use_full_dm_value(self): return self.use_full_dm_rds.getvalue() == "Yes" class Clustalo_regular_window_qt(Clustalo_base_window_qt, Regular_alignment_window_qt): pass class Clustalo_profile_window_qt(Clustalo_base_window_qt, Profile_alignment_window_qt): pass
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/muscle.py
.py
3,872
93
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ MUSCLE. """ import os from Bio.Align.Applications import MuscleCommandline # Protocols. from ._base_alignment._base_regular_alignment import Regular_sequence_alignment # GUI. from ._base_alignment._gui import Regular_alignment_window_qt from pymod_lib.pymod_gui.shared_gui_components_qt import PyMod_radioselect_qt class MUSCLE_alignment: alignment_program = "muscle" def additional_initialization(self): self.tool = self.pymod.muscle def get_options_from_gui(self): self.params_from_gui = {} self.params_from_gui["muscle_mode"] = self.alignment_window.get_muscle_mode() return True class MUSCLE_regular_alignment(MUSCLE_alignment, Regular_sequence_alignment): protocol_name = "muscle" def get_alignment_window_class_qt(self): return MUSCLE_regular_window_qt def run_regular_alignment_program(self, sequences_to_align, output_file_name): self.run_muscle(sequences_to_align, output_file_name=output_file_name, muscle_mode=self.params_from_gui["muscle_mode"]) def run_muscle(self, sequences_to_align, output_file_name, muscle_mode): """ This method allows to interact with the local MUSCLE. """ # TODO: to insert the following options: # - guide tree from: # - none # - first iteration # - second iteration self.pymod.build_sequence_file(sequences_to_align, output_file_name, unique_indices_headers=True) # Input FASTA for MUSCLE. infasta=os.path.join(self.pymod.alignments_dirpath, output_file_name + ".fasta") # Output FASTA from MUSCLE, in tree order. outfasta_tree=os.path.join(self.pymod.alignments_dirpath, output_file_name + ".out_fasta") # Output ALN. outaln=os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln") muscle_exec = self.tool["exe_file_path"].get_value() if muscle_mode == "highest_accuracy": cline = MuscleCommandline(muscle_exec, input=infasta, out=outfasta_tree, clwout=outaln) elif muscle_mode == "large_datasets": cline = MuscleCommandline(muscle_exec, input=infasta, out=outfasta_tree, clwout=outaln, maxiters=2) elif muscle_mode == "fastest": cline = MuscleCommandline(muscle_exec, input=infasta, out=outfasta_tree, clwout=outaln, maxiters=1, diags=True, sv=True, distance1="kbit20_3") else: raise KeyError(muscle_mode) self.pymod.execute_subprocess(str(cline)) class MUSCLE_regular_window_qt(Regular_alignment_window_qt): def build_algorithm_options_widgets(self): # MUSCLE mode radioselect (for more information see: https://www.drive5.com/muscle/manual/). self.muscle_modes = ["Highest Accuracy", "Large Datasets", "Fastest"] self.muscle_modes_short = ["highest_accuracy", "large_datasets", "fastest"] self.muscle_modes_dict = dict((k, v) for (k, v) in zip(self.muscle_modes, self.muscle_modes_short)) self.muscle_mode_rds = PyMod_radioselect_qt(label_text="MUSCLE Mode", buttons=self.muscle_modes) self.muscle_mode_rds.setvalue(self.muscle_modes[0]) self.middle_formlayout.add_widget_to_align(self.muscle_mode_rds) self.middle_formlayout.set_input_widgets_width("auto") def get_muscle_mode(self): return self.muscle_modes_dict[self.muscle_mode_rds.getvalue()]
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/clustalw.py
.py
7,335
175
# Copyright 2016 by Chengxin Zhang, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ ClustalW. """ import os from Bio.Align.Applications import ClustalwCommandline # Protocols. from ._clustal_common import Clustal_regular_alignment, Clustal_profile_alignment # GUI. from ._base_alignment._gui import Regular_alignment_window_qt, Profile_alignment_window_qt from pymod_lib.pymod_gui.shared_gui_components_qt import PyMod_radioselect_qt, PyMod_entryfield_qt class Clustalw_alignment: """ General Clustal W alignments. """ protocol_name = "clustalw" def additional_initialization(self): self.tool = self.pymod.clustalw def get_options_from_gui(self): self.params_from_gui = {} error_from_gui = False try: self.params_from_gui["selected_matrix"] = self.alignment_window.get_matrix_value() except Exception as e: error_from_gui = True error_message = "Invalid Matrix." try: self.params_from_gui["gapopen_value"] = float(self.alignment_window.get_gapopen_value()) except Exception as e: error_from_gui = True error_message = str(e) # "Invalid Gap Open Value." try: self.params_from_gui["gapextension_value"] = float(self.alignment_window.get_gapextension_value()) except Exception as e: error_from_gui = True error_message = str(e) # "Invalid Gap Extension Value." if error_from_gui: self.pymod.main_window.show_error_message("Parameters Error", error_message) return False else: return True class Clustalw_regular_alignment(Clustalw_alignment, Clustal_regular_alignment): def get_alignment_window_class_qt(self): return Clustalw_regular_window_qt def run_regular_alignment_program(self, sequences_to_align, output_file_name): self.run_clustalw(sequences_to_align, output_file_name=output_file_name, matrix=self.params_from_gui["selected_matrix"], gapopen=self.params_from_gui["gapopen_value"], gapext=self.params_from_gui["gapextension_value"]) def run_clustalw(self, sequences_to_align, output_file_name, matrix="blosum", gapopen=10, gapext=0.2): """ This method allows to interact with the local ClustalW. """ # First build an input FASTA file containing the sequences to be aligned. self.pymod.build_sequence_file(sequences_to_align, output_file_name, unique_indices_headers=True) # Sets the full paths of input and output files. input_file_path = os.path.join(self.pymod.alignments_dirpath, output_file_name + ".fasta") output_file_path = os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln") # Run an alignment with all the sequences using ClustalW command line, through Biopython. cline = ClustalwCommandline(self.pymod.clustalw["exe_file_path"].get_value(), infile=input_file_path, outfile=output_file_path, outorder="INPUT", matrix=matrix, gapopen=gapopen, gapext=gapext) self.pymod.execute_subprocess(str(cline)) class Clustalw_profile_alignment(Clustalw_alignment, Clustal_profile_alignment): def get_alignment_window_class(self): return Clustalw_profile_window def get_alignment_window_class_qt(self): return Clustalw_profile_window_qt def prepare_sequence_to_profile_commandline(self, profile_file_shortcut, sequences_to_add_file_shortcut, output_file_shortcut): clustalw_path = self.tool["exe_file_path"].get_value() cline='"' +clustalw_path+'"'+ \ ' -PROFILE1="'+profile_file_shortcut+'"'+ \ ' -PROFILE2="'+sequences_to_add_file_shortcut+'" -SEQUENCES -OUTORDER=INPUT'+ \ ' -MATRIX=' +self.params_from_gui["selected_matrix"] + \ ' -GAPOPEN=' +str(self.params_from_gui["gapopen_value"]) + \ ' -GAPEXT=' +str(self.params_from_gui["gapextension_value"]) + \ ' -OUTFILE="' +output_file_shortcut+'.aln"' return cline def prepare_profile_to_profile_commandline(self, profile1, profile2, output_file_shortcut): clustalw_path = self.tool["exe_file_path"].get_value() cline='"' +clustalw_path+'"' \ ' -PROFILE1="' +profile1+'"'+ \ ' -PROFILE2="' +profile2+'" -OUTORDER=INPUT' \ ' -MATRIX=' +self.params_from_gui["selected_matrix"]+ \ ' -GAPOPEN=' +str(self.params_from_gui["gapopen_value"])+ \ ' -GAPEXT=' +str(self.params_from_gui["gapextension_value"])+ \ ' -OUTFILE="' +output_file_shortcut+'.aln"' return cline ################################################################################################### # GUI. # ################################################################################################### class Clustalw_base_window_qt: """ Base class for ClustalW protocols. """ def build_algorithm_options_widgets(self): # Scoring matrix radioselect. self.clustal_matrices = ["Blosum", "Pam", "Gonnet", "Id"] self.clustal_matrices_dict = {"Blosum": "blosum", "Pam": "pam", "Gonnet": "gonnet", "Id": "id"} self.matrix_rds = PyMod_radioselect_qt(label_text="Scoring Matrix Selection", buttons=self.clustal_matrices) self.matrix_rds.setvalue("Blosum") self.middle_formlayout.add_widget_to_align(self.matrix_rds) # Gap open entryfield. self.gapopen_enf = PyMod_entryfield_qt(label_text="Gap Opening Penalty", value="10.0", validate={'validator': 'real', 'min': 0, 'max': 1000}) self.middle_formlayout.add_widget_to_align(self.gapopen_enf) # Gap extension entryfield. self.gapextension_enf = PyMod_entryfield_qt(label_text="Gap Extension Penalty", value="0.2", validate={'validator': 'real', 'min': 0, 'max': 1000}) self.middle_formlayout.add_widget_to_align(self.gapextension_enf) self.middle_formlayout.set_input_widgets_width("auto") def get_matrix_value(self): return self.clustal_matrices_dict[self.matrix_rds.getvalue()] def get_gapopen_value(self): return self.gapopen_enf.getvalue(validate=True) def get_gapextension_value(self): return self.gapextension_enf.getvalue(validate=True) class Clustalw_regular_window_qt(Clustalw_base_window_qt, Regular_alignment_window_qt): pass class Clustalw_profile_window_qt(Clustalw_base_window_qt, Profile_alignment_window_qt): pass
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/salign_seq.py
.py
15,705
316
# Copyright 2016 by Chengxin Zhang, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os from Bio import SeqIO import pymod_lib.pymod_vars as pmdt from pymod_lib.pymod_seq.seq_io import convert_sequence_file_format from ._base_alignment._base_regular_alignment import Regular_sequence_alignment from ._base_alignment._base_profile_alignment import Profile_alignment from ._salign_common import SALIGN_alignment, SALIGN_regular_alignment from ._base_alignment._gui import Regular_alignment_window_qt, Profile_alignment_window_qt from pymod_lib.pymod_gui.shared_gui_components_qt import PyMod_radioselect_qt try: import modeller except: pass class SALIGN_seq_alignment(SALIGN_alignment): """ Mixin class for SALIGN sequence alignments (both regular and profile alignments). """ alignment_program = "salign-seq" def additional_initialization(self): self.tool = self.pymod.modeller def get_options_from_gui(self): self.use_str_information = self.alignment_window.get_use_str_information_var() return True def check_structural_information(self): if self.use_str_information: for e in self.elements_to_align: if "X" in e.my_sequence: print("- WARNING: could not use structural information in this alignment because of an unknown residue in '%s'." % (e.my_header)) self.use_str_information = False break if e.polymer_type == "nucleic_acid": print("- WARNING: could not use structural information in this alignment because '%s' is a nucleic acid element." % (e.my_header)) self.use_str_information = False break def run_regular_alignment_program(self, sequences_to_align, output_file_name, use_parameters_from_gui=True, use_structural_information=False): self.check_structural_information() if use_parameters_from_gui: use_structural_information = self.use_str_information self.run_salign_malign(sequences_to_align, output_file_name, use_structural_information) def run_salign_malign(self, sequences_to_align, output_file_name, use_structural_information): """ alignment.malign - align sequences alignment.align2d - sequence-structure alignment """ shortcut_to_temp_files = os.path.join(self.pymod.alignments_dirpath, output_file_name) # The .pir file will be written in a different way if the user decides to use # structural information in the alignment. self.pymod.build_sequence_file(self.elements_to_align, output_file_name, file_format="pir", unique_indices_headers=True, use_structural_information=use_structural_information) modeller.log.minimal() env = modeller.environ() env.io.atom_files_directory = ['.', self.pymod.structures_dirpath] if self.use_hetatm: env.io.hetatm = True aln = modeller.alignment(env, file=shortcut_to_temp_files +".ali", alignment_format='PIR') if use_structural_information: env.libs.topology.read(file="$(LIB)/top_heav.lib") # # Structure sensitive variable gap penalty alignment: # aln.salign(auto_overhang=True, # gap_penalties_1d=(-100, 0), # gap_penalties_2d=(3.5,3.5,3.5,.2,4.,6.5,2.,0.,0.), # gap_function=True, # structure-dependent gap penalty # feature_weights=(1., 0., 0., 0., 0., 0.), # similarity_flag=True, # alignment_type='tree', #output='ALIGNMENT', # dendrogram_file=shortcut_to_temp_files+".tree") aln.salign(rr_file='$(LIB)/as1.sim.mat', # Substitution matrix used # output='', max_gap_length=20, gap_function=True, # If False then align2d not done feature_weights=(1., 0., 0., 0., 0., 0.), gap_penalties_1d=(-100, 0), gap_penalties_2d=(3.5, 3.5, 3.5, 0.2, 4.0, 6.5, 2.0, 0.0, 0.0), similarity_flag=True, # Ensuring that the dynamic programming # matrix is not scaled to a difference matrix alignment_type="tree", dendrogram_file=shortcut_to_temp_files+".tree") else: aln.salign(auto_overhang=True, gap_penalties_1d=(-450, 0), alignment_type='tree', output='ALIGNMENT', dendrogram_file=shortcut_to_temp_files+".tree") aln.write(file=shortcut_to_temp_files +'.ali', alignment_format='PIR') # Convert output_file_name.ali to alignment_tmp.fasta. record = SeqIO.parse(shortcut_to_temp_files + ".ali", "pir") SeqIO.write(record, shortcut_to_temp_files + ".aln", "clustal") def salign_profile_profile_alignment(self, output_file_name="al_result", use_structural_information=False): profile1_name = self.profiles_to_join_file_list[0]+".ali" profile1_shortcut = os.path.join(self.pymod.alignments_dirpath, profile1_name) modeller.log.minimal() env = modeller.environ() env.io.atom_files_directory = ['.', self.pymod.structures_dirpath] if self.use_hetatm: env.io.hetatm = True env.libs.topology.read(file="$(LIB)/top_heav.lib") for profile2 in [os.path.join(self.pymod.alignments_dirpath, e+".ali") for e in self.profiles_to_join_file_list[1:]]: # cat profile2 to profile1 and return number of sequences # in the original profile1 with open(profile1_shortcut,'r') as p_fh: ali_txt1 = p_fh.read() with open(profile2,'r') as p_fh: ali_txt2 = p_fh.read() align_block = len([e for e in ali_txt1.splitlines() if e.startswith('>')]) with open(profile1_shortcut, 'w') as p_fh: p_fh.write(ali_txt1+ali_txt2) aln = modeller.alignment(env, file=profile1_shortcut, alignment_format="PIR") if use_structural_information: env.libs.topology.read(file='$(LIB)/top_heav.lib') aln.salign(rr_file='${LIB}/blosum62.sim.mat', gap_penalties_1d=(-500, 0), output='', align_block=align_block, #max_gap_length=20, align_what='PROFILE', alignment_type="PAIRWISE", comparison_type='PSSM', gap_function=True,#structure-dependent gap penalty feature_weights=(1., 0., 0., 0., 0., 0.), gap_penalties_2d=(.35,1.2,.9,1.2,.6,8.6,1.2,0.,0.), similarity_flag=True, substitution=True,smooth_prof_weight=10.0) else: aln.salign(rr_file='${LIB}/blosum62.sim.mat', gap_penalties_1d=(-500, 0), output='', align_block=align_block, # no. of seqs. in first MSA align_what='PROFILE', alignment_type='PAIRWISE', comparison_type='PSSM', similarity_flag=True, substitution=True, smooth_prof_weight=10.0) # For mixing data with priors #write out aligned profiles (MSA) aln.write(file=profile1_shortcut, alignment_format="PIR") convert_sequence_file_format(profile1_shortcut, "pir", "clustal", output_filename=output_file_name) def update_aligned_sequences(self): self.update_aligned_sequences_inserting_modres() ################################################################################################### # SALIGN sequence alignments. # ################################################################################################### class SALIGN_seq_regular_alignment(SALIGN_regular_alignment, SALIGN_seq_alignment, Regular_sequence_alignment): protocol_name = "salign-seq" def get_alignment_window_class_qt(self): return SALIGN_seq_regular_window_qt ################################################################################################### # SALIGN sequence alignments. # ################################################################################################### class SALIGN_seq_profile_alignment(SALIGN_seq_alignment, Profile_alignment): protocol_name = "salign-seq" def get_alignment_window_class_qt(self): return SALIGN_seq_profile_window_qt def run_sequence_to_profile_alignment_program(self): # List of sequences of profile to be kept (target cluster) target_cluster_element = self.selected_clusters_list[self.target_cluster_index] alignment_to_keep_elements = target_cluster_element.get_children() # Used by generate_highest_identity_pairs_list self.selected_sequences_in_target_alignment = alignment_to_keep_elements # List of the selected sequences to be appended to target cluster. self.elements_to_add = [e for e in self.pymod.get_selected_sequences() if not e in alignment_to_keep_elements] #----------------------------------------------------------------------------------------- # Perform a first sequence alignment between all selected sequences and sequences in the - # target cluster. - #----------------------------------------------------------------------------------------- initial_alignment_name = "all_temporary" self.elements_to_align = alignment_to_keep_elements + self.elements_to_add self.check_structural_information() # Perform sequence alignment even if sequence-structure alignment was requested, because the # former is signficantly faster. self.run_regular_alignment_program(self.elements_to_align, initial_alignment_name, use_parameters_from_gui=False, use_structural_information=False) #----------------------------------------------------------------------------------------- # For each sequence to be appended to the alignment, finds the most similiar sequence in - # the target cluster according to previous multiple sequence alignment. - #----------------------------------------------------------------------------------------- highest_identity_pairs_list = self.generate_highest_identity_pairs_list(initial_alignment_name) max_identity_list = [max(row) for row in highest_identity_pairs_list] # map(max, highest_identity_pairs_list) # Sort 'self.elements_to_add' according to 'max_identity_list'. max_identity_list, self.elements_to_add = zip(*sorted(zip(max_identity_list, self.elements_to_add), reverse=True, key=lambda t: t[0])) #------------------------------------- # Construct a PIR format input file. - #------------------------------------- self.profiles_to_join_file_list=[] profiles=[alignment_to_keep_elements]+[[e] for e in self.elements_to_add] for (i,children) in enumerate(profiles): file_name = "cluster_" + str(i) self.pymod.build_sequence_file(children, file_name, file_format="pir", remove_indels=False, use_structural_information=self.use_str_information, unique_indices_headers=True) self.profiles_to_join_file_list.append(file_name) #----------------------------------------------------------------------------------- # Sequentially apply profile-profile alignment to each element of elements_to_add. - #----------------------------------------------------------------------------------- profile_alignment_output = "al_result" self.salign_profile_profile_alignment(output_file_name=profile_alignment_output, use_structural_information=self.use_str_information) self.build_elements_to_align_dict(self.elements_to_align) self.protocol_output_file_name = profile_alignment_output def run_profile_to_profile_alignment_program(self): # Sequences in selected clusters will all be aligned. Sequences not in selected clusters, # will not be aligned. for cluster in self.selected_clusters_list: self.elements_to_align += list(cluster.get_children()) self.check_structural_information() self.profiles_to_join_file_list=[] # two MSA files for (i,cluster) in enumerate(self.selected_clusters_list): file_name = "cluster_" + str(i) # Build FASTA with the MSAs. children = cluster.get_children() # Builds a series of alignment files for each selected cluster. # self.pymod.build_sequence_file(children, file_name, file_format="clustal", remove_indels = False, unique_indices_headers=True) self.pymod.build_sequence_file(children, file_name, file_format="pir", remove_indels = False, use_structural_information=self.use_str_information, unique_indices_headers=True) self.profiles_to_join_file_list.append(file_name) profile_alignment_output = "al_result" output_file_shortcut=os.path.join(self.pymod.alignments_dirpath, profile_alignment_output) profile1=os.path.join(self.pymod.alignments_dirpath, self.profiles_to_join_file_list[0]+".aln") self.salign_profile_profile_alignment(profile_alignment_output, use_structural_information=self.use_str_information) self.build_elements_to_align_dict(self.elements_to_align) self.protocol_output_file_name = profile_alignment_output ################################################################################################### # GUI. # ################################################################################################### class SALIGN_seq_base_window_qt: def build_algorithm_options_widgets(self): if self.protocol.structures_are_selected: # Use structure information to guide sequence alignment. self.salign_seq_struct_rds = PyMod_radioselect_qt(label_text="Use structural information", buttons=('Yes', 'No')) self.salign_seq_struct_rds.setvalue("No") self.middle_formlayout.add_widget_to_align(self.salign_seq_struct_rds) self.middle_formlayout.set_input_widgets_width("auto") def get_use_str_information_var(self): if self.protocol.structures_are_selected: return pmdt.yesno_dict[self.salign_seq_struct_rds.getvalue()] else: return False class SALIGN_seq_regular_window_qt(SALIGN_seq_base_window_qt, Regular_alignment_window_qt): pass class SALIGN_seq_profile_window_qt(SALIGN_seq_base_window_qt, Profile_alignment_window_qt): pass
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/ce_alignment.py
.py
9,990
245
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Module for performing CE-alignments in PyMod. """ import os import shutil from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from pymol import cmd from pymod_lib.pymod_seq.seq_star_alignment import save_cstar_alignment from ._base_alignment._base_regular_alignment import Regular_structural_alignment from ._base_alignment._gui import Regular_alignment_window_qt, Structural_alignment_base_window_qt from pymod_lib.pymod_gui.shared_gui_components_qt import PyMod_combobox_qt ################################################################################################## # CE alignment. # ################################################################################################## class CEalign_alignment: alignment_program = "ce" def additional_initialization(self): self.tool = None def alignment_program_exists(self): """ In PyMOL 2 CE-align is alway present. """ return True def alignment_program_not_found(self): title = "CE-alignment Error" message = "CE-alignment is not available on your PyMod installation. If you want to use this function please see CE-alignment installation instructions on PyMod's User Guide." self.pymod.main_window.show_error_message(title, message) def update_aligned_sequences(self): self.update_aligned_sequences_inserting_modres() class CEalign_regular_alignment(CEalign_alignment, Regular_structural_alignment): protocol_name = "ce" def get_alignment_window_class_qt(self): return CEalign_regular_window_qt def run_regular_alignment_program(self, sequences_to_align, output_file_name): # Change the order of the 'sequences_to_align' so that the center star is the first element # in the list. _sequences_to_align = sequences_to_align[:] reference_id = self.alignment_window.get_reference_id() reference_element = _sequences_to_align.pop(reference_id) _sequences_to_align.insert(0, reference_element) # Run the alignment. self.run_ce_alignment(_sequences_to_align, output_file_name=output_file_name) def run_ce_alignment(self, structures_to_align, output_file_name, use_seq_info=False): """ Used to launch Ce_align. """ backup_list = structures_to_align[:] #----------------------------------------------------------------------------- # Align the first two structures and produces an ce_temp.txt alignment file. - #----------------------------------------------------------------------------- temp_ceali_prefix = "ce_temp" temp_ceali = temp_ceali_prefix + "_0" temp_ceali_list = [temp_ceali] current_elements_to_align = backup_list[0:2] self.ce_align(current_elements_to_align, output_file_name=temp_ceali) if len(backup_list) > 2: max_row = 0 #------------------------------------------------------------------- # Align the rest of the structures to the first one progressively. - #------------------------------------------------------------------- mceali_count = 1 for n in range(2, len(backup_list)): current_elements_to_align = [backup_list[0], backup_list[n]] temp_ceali_n = "%s_%s" % (temp_ceali_prefix, mceali_count) self.ce_align(current_elements_to_align, output_file_name=temp_ceali_n) temp_ceali_list.append(temp_ceali_n) mceali_count += 1 #------------------------------------------------------------------------- # Join the alignments using a modification of the center star alignment. - #------------------------------------------------------------------------- # center_star_id = backup_list[0].get_unique_index_header() aligned_pairs = [] seqs = [] all_ids = [] # Builds a list of pairwise alignments. for temp_ceali_i_id, temp_ceali_i in enumerate(temp_ceali_list): c0_recs = list(SeqIO.parse(os.path.join(self.pymod.alignments_dirpath, temp_ceali_i + ".aln"), "clustal")) aligned_pairs.append([str(c0_recs[0].seq), str(c0_recs[1].seq)]) if temp_ceali_i_id == 0: seqs.append(str(c0_recs[0].seq).replace("-", "")) all_ids.append(c0_recs[0].id) seqs.append(str(c0_recs[1].seq).replace("-", "")) all_ids.append(c0_recs[1].id) for ei_id, ei in enumerate(backup_list[1:]): for ej in backup_list[ei_id+1:]: if ei is ej: continue aligned_pairs.append(None) # Joins the alignments. save_cstar_alignment(seqs=seqs, all_ids=all_ids, pairwise_alis=aligned_pairs, output_filepath=os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln")) #----------------------------------------------------------------------------------- # Complete by cleaning up the temporary files and by creating a final output file. - #----------------------------------------------------------------------------------- for temp_ceali_n in temp_ceali_list: os.remove(os.path.join(self.pymod.alignments_dirpath, temp_ceali_n + ".aln")) else: shutil.move(os.path.join(self.pymod.alignments_dirpath, temp_ceali + ".aln"), os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln")) def ce_align(self, elements_to_align, output_file_name=None, use_seq_info=False): """ Actually performs the structural alignment. """ #---------------------------------------------------- # Run CE-alignment using the PyMOL built-in module. - #---------------------------------------------------- retain_order = True if retain_order: cmd.set("retain_order", 1) sel1 = elements_to_align[0].get_pymol_selector() sel2 = elements_to_align[1].get_pymol_selector() # Sets temporary names. tsel1 = "t" + elements_to_align[0].get_unique_index_header() tsel2 = "t" + elements_to_align[1].get_unique_index_header() cmd.set_name(sel1, tsel1) cmd.set_name(sel2, tsel2) # Actually performs the alignment. a = cmd.cealign(target=tsel1, mobile=tsel2, object="pymod_temp_cealign") # cmd.center('%s and %s' % (tsel1, tsel2)) # cmd.zoom('%s and %s' % (tsel1, tsel2)) # Updates the names of the chains PDB files and saves these new files. saved_file1 = sel1 + "_aligned.pdb" saved_file2 = sel2 + "_aligned.pdb" # elements_to_align[0].structure.chain_pdb_file_name = saved_file1 # elements_to_align[1].structure.chain_pdb_file_name = saved_file2 cmd.save(os.path.join(self.pymod.structures_dirpath, saved_file1), tsel1) cmd.save(os.path.join(self.pymod.structures_dirpath, saved_file2), tsel2) # Finally saves the structural alignment between the sequences. cmd.save(os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln"), "pymod_temp_cealign") cmd.delete("pymod_temp_cealign") # Converts it in .aln format. recs = SeqIO.parse(os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln"), "clustal") new_recs = [] for rec, pymod_element in zip(recs, (elements_to_align[0], elements_to_align[1])): new_rec_id = "_".join(rec.id[1:].split("_")[0:3]) new_rec_seq = str(rec.seq).replace("?", "X") # Replaces modified residues. new_recs.append(SeqRecord(Seq(new_rec_seq), id=new_rec_id)) SeqIO.write(new_recs, os.path.join(self.pymod.alignments_dirpath, output_file_name + ".aln"), "clustal") # Sets the names of the objects back to original ones. cmd.set_name(tsel1, sel1) cmd.set_name(tsel2, sel2) if retain_order: cmd.set("retain_order", 0) ################################################################################################### # Classes for the GUI. # ################################################################################################### class CEalign_base_window_qt(Structural_alignment_base_window_qt): def build_algorithm_options_widgets(self): # Reference structure combobox. if len(self.protocol.selected_elements) > 2: structures_list = [element.my_header for element in self.protocol.selected_elements] self.reference_combobox = PyMod_combobox_qt(label_text="Reference Structure", items=structures_list) self.reference_combobox.combobox.setCurrentIndex(0) self.middle_formlayout.add_widget_to_align(self.reference_combobox) # RMSD options. self.build_rmsd_option() self.middle_formlayout.set_input_widgets_width(130) def get_reference_id(self): try: return self.reference_combobox.get_index() except: print("- Warning: could not obtain the reference structure id.") return 0 class CEalign_regular_window_qt(CEalign_base_window_qt, Regular_alignment_window_qt): pass
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/__init__.py
.py
0
0
null
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/_clustal_common.py
.py
6,622
131
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import shutil import re import pymod_lib.pymod_vars as pmdt from ._base_alignment._base_regular_alignment import Regular_sequence_alignment from ._base_alignment._base_profile_alignment import Profile_alignment class Clustal_regular_alignment(Regular_sequence_alignment): def update_additional_information(self): """ Sets the guide tree file path once the alignment has been performed. """ if len(self.elements_to_align) > 2 and self.alignment_mode in ("build-new-alignment", "rebuild-old-alignment"): # Builds a permanent copy of the original temporary .dnd file. temp_dnd_file_path = os.path.join(self.pymod.alignments_dirpath, self.protocol_output_file_name+".dnd") new_dnd_file_path = os.path.join(self.pymod.alignments_dirpath, "%s_%s_guide_tree.dnd" % (self.pymod.alignments_files_names, self.alignment_element.unique_index)) shutil.copy(temp_dnd_file_path, new_dnd_file_path) # Edit the new .dnd file to insert the actual names of the sequences. dnd_file_handler = open(new_dnd_file_path, "r") dnd_file_lines = dnd_file_handler.readlines() dnd_file_handler.close() new_dnd_file_lines = [] for line in dnd_file_lines: for m in re.findall(pmdt.unique_index_header_regex, line): line = line.replace(m, self.elements_to_align_dict[m].my_header) new_dnd_file_lines.append(line) dnd_file_handler = open(new_dnd_file_path, "w") for line in new_dnd_file_lines: dnd_file_handler.write(line) dnd_file_handler.close() # # ClustalO produces a .dnd file without changing the ":" characters in the name of the # # PDB chains and this gives problems in displaying the names when using Phylo. So the # # ":" characters have to be changed in "_". # if self.protocol_name == "clustalo": # old_dnd_file = open(new_dnd_file_path,"rU") # new_dnd_file_content = '' # for dnd_item in old_dnd_file.readlines(): # if re.search(r"_Chain\:?\:",dnd_item): # Chain_pos=dnd_item.find("_Chain:")+6 # dnd_item=dnd_item[:Chain_pos]+'_'+dnd_item[Chain_pos+1:] # new_dnd_file_content+=dnd_item # old_dnd_file.close() # new_dnd_file = open(new_dnd_file_path,"w") # new_dnd_file.write(new_dnd_file_content) # new_dnd_file.close() self.alignment_element.set_tree_file_path(new_dnd_file_path) class Clustal_profile_alignment(Profile_alignment): def run_sequence_to_profile_alignment_program(self): """ Align sequences to a target profile by clustalw/clustalo. """ # List of sequences belonging to profile to be kept (target cluster). target_cluster_element = self.selected_clusters_list[self.target_cluster_index] target_profile_elements = target_cluster_element.get_children() # List of sequences to be appended to target cluster. self.elements_to_add = [e for e in self.pymod.get_selected_sequences() if not e in target_profile_elements] # create target cluster file profile_file_name = "cluster_0" profile_file_shortcut=os.path.join(self.pymod.alignments_dirpath, profile_file_name+".fasta") self.pymod.build_sequence_file(target_profile_elements, profile_file_name, file_format="fasta", remove_indels=False, unique_indices_headers=True) # create sequence file for sequences to be appended to target cluster sequences_to_add_file_name = "cluster_1" sequences_to_add_file_shortcut=os.path.join(self.pymod.alignments_dirpath, sequences_to_add_file_name+".fasta") self.pymod.build_sequence_file(self.elements_to_add, sequences_to_add_file_name, file_format="fasta", remove_indels=True, unique_indices_headers=True) # Output file name. sequence_to_profile_output = "al_result" output_file_shortcut = os.path.join(self.pymod.alignments_dirpath, sequence_to_profile_output) # Actually run the sequence to profile alignment. cline = self.prepare_sequence_to_profile_commandline(profile_file_shortcut, sequences_to_add_file_shortcut, output_file_shortcut) self.pymod.execute_subprocess(cline) # Converts the .aln output file into a .txt file, that will be used to update the sequences # loaded in PyMod. self.build_elements_to_align_dict(target_profile_elements+self.elements_to_add) self.protocol_output_file_name = sequence_to_profile_output def run_profile_to_profile_alignment_program(self): # Sequences in selected clusters will all be aligned. Sequences not in selected clusters, # will not be aligned. for cluster in self.selected_clusters_list: self.elements_to_align += list(cluster.get_children()) self.profiles_to_join_file_list=[] # two MSA files for (i,cluster) in enumerate(self.selected_clusters_list): file_name = "cluster_" + str(i) # Build FASTA with the MSAs. children = cluster.get_children() # Builds a series of alignment files for each selected cluster. self.pymod.build_sequence_file(children, file_name, file_format="clustal", remove_indels = False, unique_indices_headers=True) self.profiles_to_join_file_list.append(file_name) profile_alignment_output = "al_result" output_file_shortcut=os.path.join(self.pymod.alignments_dirpath, profile_alignment_output) profile1=os.path.join(self.pymod.alignments_dirpath, self.profiles_to_join_file_list[0]+".aln") for profile2 in self.profiles_to_join_file_list[1:]: profile2=os.path.join(self.pymod.alignments_dirpath, profile2+".aln") cline = self.prepare_profile_to_profile_commandline(profile1, profile2, output_file_shortcut) self.pymod.execute_subprocess(cline) profile1=output_file_shortcut+'.aln' self.build_elements_to_align_dict(self.elements_to_align) self.protocol_output_file_name = profile_alignment_output
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/_salign_common.py
.py
3,017
71
# Copyright 2016 by Chengxin Zhang, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import shutil import re import pymod_lib.pymod_vars as pmdt ################################################################################################### # SALIGN MIXINS. # ################################################################################################### class SALIGN_alignment: """ Mixin for all SALIGN alignments. """ use_hetatm = False def alignment_program_exists(self): modeller_error = self.pymod.modeller.check_exception() if modeller_error is not None: message = "In order to use SALIGN, MODELLER must be installed and configured correctly. %s" % modeller_error self.pymod.main_window.show_error_message("MODELLER Error", message) return None return True def alignment_program_not_found(self): """ This method does nothing, the error message is already showed in 'alignment_program_exists' in SALIGN protocols. """ pass class SALIGN_regular_alignment: """ Mixin for SALIGN regular alignments (both sequence and structural). """ def update_additional_information(self): """ Sets the dendrogram file path once the alignment has been performed. """ if len(self.elements_to_align) > 2 and self.alignment_mode in ("build-new-alignment", "rebuild-old-alignment"): # Builds a permanent copy of the original temporary .dnd file. temp_dnd_file_path = os.path.join(self.pymod.alignments_dirpath, self.protocol_output_file_name + ".tree") new_dnd_file_path = os.path.join(self.pymod.alignments_dirpath, "%s_%s_dendrogram.tree" % (self.pymod.alignments_files_names, self.alignment_element.unique_index)) if os.path.isfile(temp_dnd_file_path): shutil.copy(temp_dnd_file_path, new_dnd_file_path) else: return None # Edit the new .dnd file to insert the actual names of the sequences. dnd_file_handler = open(new_dnd_file_path, "r") dnd_file_lines = dnd_file_handler.readlines() dnd_file_handler.close() new_dnd_file_lines = [] for line in dnd_file_lines: for m in re.findall(pmdt.unique_index_header_regex, line): line = line.replace(m, self.elements_to_align_dict[m].my_header) new_dnd_file_lines.append(line) dnd_file_handler = open(new_dnd_file_path, "w") for line in new_dnd_file_lines: dnd_file_handler.write(line) dnd_file_handler.close() self.alignment_element.set_tree_file_path(new_dnd_file_path)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/salign_str.py
.py
6,506
139
# Copyright 2016 by Chengxin Zhang, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os from Bio import SeqIO from pymol import cmd try: import modeller except: pass from ._salign_common import SALIGN_alignment, SALIGN_regular_alignment from ._base_alignment._base_regular_alignment import Regular_structural_alignment from ._base_alignment._gui import Regular_alignment_window_qt, Structural_alignment_base_window_qt ################################################################################################### # SALIGN structural alignment. # ################################################################################################### class SALIGN_str_regular_alignment(SALIGN_regular_alignment, SALIGN_alignment, Regular_structural_alignment): alignment_program = "salign-str" protocol_name = "salign-str" def additional_initialization(self): self.tool = self.pymod.modeller def get_alignment_window_class_qt(self): return SALIGN_str_regular_window_qt def run_regular_alignment_program(self, sequences_to_align, output_file_name, use_parameters_from_gui=True, use_structural_information=False): if use_parameters_from_gui: pass self.run_salign_align3d(sequences_to_align, output_file_name) def run_salign_align3d(self, structures_to_align, output_file_name): """ alignment.malign3d - align structures """ # if len(structures_to_align)>2: # self.build_salign_dendrogram_menu=True # else: # salign only output dendrogram_file when there are 3 sequences or more # self.build_salign_dendrogram_menu=False shortcut_to_temp_files = os.path.join(self.pymod.current_project_dirpath,self.pymod.alignments_dirpath,output_file_name) struct_tup=list(range(0,len(structures_to_align))) for ii in range(0,len(structures_to_align)): struct_entry=structures_to_align[ii].get_pymol_selector() header = structures_to_align[ii].get_unique_index_header() chain_id=structures_to_align[ii].get_chain_id() struct_tup[ii]=(struct_entry,header,chain_id) # Change the working directory, so that the ouptut files will be created in the structures # directory. os.chdir(self.pymod.structures_dirpath) modeller.log.minimal() env = modeller.environ() aln = modeller.alignment(env) for (pdb_file_name, code, chain) in struct_tup: mdl = modeller.model(env, file=pdb_file_name, model_segment=("FIRST:"+chain,"LAST:"+chain)) aln.append_model(mdl, atom_files=pdb_file_name, align_codes=code) for (weights, write_fit, whole) in (((1., 0., 0., 0., 1., 0.), False, True), ((1., 0.5, 1., 1., 1., 0.), False, True), ((1., 1., 1., 1., 1., 0.), True, False)): aln.salign(rms_cutoff=3.5, normalize_pp_scores=False, rr_file="$(LIB)/as1.sim.mat", overhang=30, gap_penalties_1d=(-450, -50), gap_penalties_3d=(0, 3), gap_gap_score=0, gap_residue_score=0, dendrogram_file= shortcut_to_temp_files + ".tree", alignment_type="tree", feature_weights=weights, improve_alignment=True, fit=True, write_fit=write_fit, write_whole_pdb=whole,output="ALIGNMENT QUALITY") aln.write(file=shortcut_to_temp_files +".ali", alignment_format="PIR") aln.salign(rms_cutoff=1.0, normalize_pp_scores=False, rr_file='$(LIB)/as1.sim.mat', overhang=30, gap_penalties_1d=(-450, -50), gap_penalties_3d=(0, 3), gap_gap_score=0, gap_residue_score=0, dendrogram_file=shortcut_to_temp_files + '.tree', alignment_type='progressive', feature_weights=[0]*6, improve_alignment=False, fit=False, write_fit=True, write_whole_pdb=False,output='QUALITY') # Returns back to the project dir from the project/Structures directory. os.chdir(self.pymod.current_project_dirpath) # SALIGN does not superpose ligands. The generated "*_fit.pdb" # files are therefore ligandless. The following loop superposes # original structure to saligned structures, and replaces # "*_fit.pdb" files with the superposed liganded original structure. for pymod_element, (pdb_file_name_root, code, chain) in zip(structures_to_align, struct_tup): # Updates the name of the chains PDB files. fixed=os.path.join(self.pymod.structures_dirpath, pdb_file_name_root + "_fit.pdb") pymod_element.set_current_chain_file(os.path.join(self.pymod.current_project_dirpath, self.pymod.structures_dirpath, pdb_file_name_root + "_fit.pdb")) cmd.load(fixed,"salign_fixed_fit") if hasattr(cmd,"super"): # super is sequence-independent cmd.super(pdb_file_name_root,"salign_fixed_fit") else: # PyMOL 0.99 does not have cmd.super cmd.align(pdb_file_name_root,"salign_fixed_fit") cmd.set("retain_order", 1) cmd.save(fixed, pdb_file_name_root) # quick-and-dirty cmd.set("retain_order", 0) cmd.delete("salign_fixed_fit") # Convert the PIR format output file into a clustal format file. record = SeqIO.parse(shortcut_to_temp_files + '.ali', "pir") SeqIO.write(record, shortcut_to_temp_files + ".aln", "clustal") def update_aligned_sequences(self): self.update_aligned_sequences_inserting_modres() def update_additional_information(self): SALIGN_regular_alignment.update_additional_information(self) Regular_structural_alignment.update_additional_information(self) class SALIGN_str_base_window_qt(Structural_alignment_base_window_qt): def build_algorithm_options_widgets(self): self.build_rmsd_option() self.middle_formlayout.set_input_widgets_width("auto") class SALIGN_str_regular_window_qt(SALIGN_str_base_window_qt, Regular_alignment_window_qt): pass
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/_base_alignment/_base_regular_alignment.py
.py
27,738
578
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Regular alignments. """ import os from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from pymod_lib import pymod_vars from pymod_lib.pymod_protocols.alignment_protocols._base_alignment import Alignment_protocol from pymod_lib.pymod_seq.seq_star_alignment import save_cstar_alignment, join_alignments from pymod_lib.pymod_gui.shared_gui_components_qt import askyesno_qt class Regular_alignment(Alignment_protocol): alignment_strategy = "regular-alignment" ################################################################# # Start the alignment process. # ################################################################# def check_sequences_level(self): """ This method is used to ask the user a confirmation before performing an alignment in certain situations (for example when building an alignment only with sequences belonging to the same cluster). """ proceed_with_alignment = False self.clusters_are_involved = False self.rebuild_single_alignment_choice = False self.extract_siblings_choice = False # Only root sequences are involved. if len(self.involved_clusters_list) == 0 and len(self.selected_root_sequences_list) > 0: proceed_with_alignment = True # Only one cluster and not external sequences are involved. if len(self.involved_clusters_list) == 1 and len(self.selected_root_sequences_list) == 0: # If there is only one cluster selected with all its elements: the user might want to # rebuild an alignment with all its elements. if set(self.selected_clusters_list) == set(self.involved_clusters_list): proceed_with_alignment = True self.rebuild_single_alignment_choice = proceed_with_alignment # Only a subset of all the elements in a clster are selected. else: title = "Extract children?" message = "Would you like to extract the selected children and build a new alignment?" proceed_with_alignment = askyesno_qt(title, message, parent=self.pymod.get_qt_parent()) self.extract_siblings_choice = proceed_with_alignment # Multiple clusters are involved. elif len(self.involved_clusters_list) > 0: self.clusters_are_involved = True proceed_with_alignment = True return proceed_with_alignment def check_alignment_joining_selection(self): """ Used to check if there is a right selection in order to perform the Alignment Joiner algorithm to join two or more clusters. """ if len(self.selected_root_sequences_list) != 0: return False correct_selection = False if len(self.involved_clusters_list) > 1: # Check that there is only one selected children per cluster. too_many_children_per_cluster = False for cluster in self.involved_clusters_list: if not self.pymod.check_only_one_selected_child_per_cluster(cluster): too_many_children_per_cluster = True break if too_many_children_per_cluster: correct_selection = False else: correct_selection = True else: correct_selection = False return correct_selection ################################################################# # Perform the alignment. # ################################################################# def define_alignment_mode(self): """ Gets parameters from the GUI in order to define the alignment mode. """ self.alignment_mode = self.alignment_window.get_alignment_mode() # Takes the index of the target cluster for the "keep-previous-alignment" mode. if self.alignment_mode == "keep-previous-alignment": self.target_cluster_index = None # If there is only one cluster involved its index its going to be 0. if len(self.involved_clusters_list) == 1: self.target_cluster_index = 0 # Cluster index. # Get the index of the cluster from the combobox. Right now it is not implemented. # else: # self.target_cluster_index = self.keep_previous_alignment_frame.get_selected_cluster_index() def set_alignment_output_file_name(self, output_file_name=None): """ If the "alignment_file_name" argument is set to "None" (this happens when performing a new alignment from the PyMod main menu), this method will automatically generate a name for it, using the standard "self.pymod.alignments_files_names" value. """ if not output_file_name: # Alignment files ending with the unique_id of the alignment are going to be created. output_file_name = "temp_%s_%s" % (self.pymod.alignments_files_names, self.pymod.unique_index) return output_file_name def perform_alignment_protocol(self, output_file_name=None): """ Actually performs the alignment. """ if self.alignment_mode in ("build-new-alignment", "rebuild-old-alignment"): self.elements_to_align = self.pymod.get_selected_sequences() self.protocol_output_file_name = self.set_alignment_output_file_name(output_file_name) self.build_elements_to_align_dict(self.elements_to_align) self.perform_regular_alignment(self.elements_to_align, self.protocol_output_file_name) elif self.alignment_mode == "keep-previous-alignment": self.align_and_keep_previous_alignment() elif self.alignment_mode == "alignment-joining": self.perform_alignment_joining() ################################ # Regular alignments protocol. # ################################ def perform_regular_alignment(self, sequences_to_align, output_file_name, alignment_program=None): """ Perform a new sequence (or structural) alignment with the algorithm provided in the "alignment_program" argument. This method can be used in other parts of the plugin independently of the whole process initiated when performing an alignment using the commands in the 'Tools' menu in the PyMod main menu. """ self.run_regular_alignment_program(sequences_to_align, output_file_name) ################################################################## # Methods for the "keep previous alignment" mode. # ################################################################## def align_and_keep_previous_alignment(self): """ Align one selected element to a cluster by aligning it to an anchor sequence in the cluster. This mode is useful when the user is manually building an alignment and wants to append some sequences to a cluster by aligning them to a specific sequence in the cluster. """ #------------------ # Initialization. - #------------------ # List of the sequences elements that belong to the target cluster. alignment_to_keep_elements = self.involved_clusters_list[self.target_cluster_index].get_children() # List of the selected sequence in the target cluster. self.selected_sequences_in_target_alignment = [e for e in alignment_to_keep_elements if e.selected] # List of the selected sequences that have to be appended to the target cluster. self.elements_to_add = [] for e in self.selected_elements: if not e.is_cluster() and not e in alignment_to_keep_elements: self.elements_to_add.append(e) #---------------------------------------------------------------------------------------- # Perform a first alignment between all the selected sequences (belonging to the target - # cluster and the external ones). - #---------------------------------------------------------------------------------------- self.initial_alignment_name = "all_temporary" self.elements_to_align = self.selected_sequences_in_target_alignment[:]+self.elements_to_add[:] self.perform_regular_alignment(self.elements_to_align, output_file_name=self.initial_alignment_name) #------------------------------------------------------------------------------------------ # Actually joins all the alignments (performs a center star alignment in which the center - # star is the anchor sequence in the target cluster). - #------------------------------------------------------------------------------------------ # First builds the al_result.txt file with the target alignment. merged_alignment_output = "al_result" # align_output.txt self.pymod.build_sequence_file(alignment_to_keep_elements, merged_alignment_output, file_format="clustal", remove_indels=False, unique_indices_headers=True) # Builds the list of pairwise alignments. j_recs = list(SeqIO.parse(os.path.join(self.pymod.alignments_dirpath, self.initial_alignment_name + ".aln"), "clustal")) c0_recs = list(SeqIO.parse(os.path.join(self.pymod.alignments_dirpath, merged_alignment_output + ".aln"), "clustal")) if j_recs[0].id in [r.id for r in c0_recs]: anchor_seq = j_recs[0] external_seq = j_recs[1] elif j_recs[1].id in [r.id for r in c0_recs]: anchor_seq = j_recs[1] external_seq = j_recs[0] else: raise KeyError("External sequence header not found in the cluster.") anchor_seq_in_cluster = [r for r in c0_recs if r.id == anchor_seq.id][0] all_recs = [anchor_seq_in_cluster, external_seq] + [r for r in c0_recs if r.id != anchor_seq.id] aligned_pairs = [] for rec_i_id, rec_i in enumerate(all_recs): for rec_j in all_recs[rec_i_id:]: if rec_i.id == rec_j.id: continue if rec_i.id == anchor_seq.id: if rec_j.id == external_seq.id: aligned_pairs.append([str(anchor_seq.seq), str(external_seq.seq)]) else: aligned_pairs.append([str(rec_i.seq), str(rec_j.seq)]) else: # Build a new alignment in the 'build_cstar_alignment' method. aligned_pairs.append(None) # Joins the alignments. seqs = [str(s.seq).replace("-", "") for s in all_recs] all_ids = [str(s.id) for s in all_recs] save_cstar_alignment(seqs=seqs, all_ids=all_ids, pairwise_alis=aligned_pairs, output_filepath=os.path.join(self.pymod.alignments_dirpath, merged_alignment_output + ".aln")) #----------------------- # Prepares the output. - #----------------------- # Builds a list of the elements to update. self.build_elements_to_align_dict(alignment_to_keep_elements + self.elements_to_add) # Sets the name of the final alignment output file. self.protocol_output_file_name = merged_alignment_output # The temporary files needed to peform this alignment will be deleted at the end of the # alignment process. ################################### # Method to perform the # # "alignment joining" mode. # ################################### def perform_alignment_joining(self): #------------------------------------------------------------------------------ # Prepares alignment files containing the alignments which have to be joined. - #------------------------------------------------------------------------------ alignments_to_join_file_list = [] elements_to_update = [] for (i, cluster) in enumerate(self.involved_clusters_list): # Build the .fasta files with the alignments. file_name = "cluster_%s" % i children = cluster.get_children() self.pymod.build_sequence_file(children, file_name, file_format="clustal", remove_indels=False, unique_indices_headers=True) alignments_to_join_file_list.append(file_name) elements_to_update.extend(children) #------------------- # Get the bridges. - #------------------- self.elements_to_align = self.pymod.get_selected_sequences() # If the bridges are specified by the user. children_list = [e for e in self.elements_to_align if e.is_child()] mothers_list = [e for e in self.elements_to_align if e.is_root_sequence()] bridges_list = children_list[:] + mothers_list[:] elements_to_update.extend(mothers_list) #----------------------------------------------- # Performs an alignment between the "bridges". - #----------------------------------------------- bridges_alignment_name = "bridges_alignment" self.perform_regular_alignment(bridges_list, bridges_alignment_name) alignment_joining_output = "al_result" #-------------------------------------------------------------------------------- # Actually joins the alignments and produces a final .aln file with the result. - #-------------------------------------------------------------------------------- # Get alignment between the anchor sequences. j_recs = list(SeqIO.parse(os.path.join(self.pymod.alignments_dirpath, bridges_alignment_name + ".aln"), "clustal")) j_recs_ids = [r.id for r in j_recs] clusters_alis = [] j_recs_ids_clusters_dict = {} for clust_id, alignment_file_name in enumerate(alignments_to_join_file_list): # Get all the records from a cluster. c_recs = list(SeqIO.parse(os.path.join(self.pymod.alignments_dirpath, alignment_file_name + ".aln"), "clustal")) # Sorts the records of this cluster. cj_rec = [r for r in c_recs if r.id in j_recs_ids][0] c_recs = [cj_rec] + [r for r in c_recs if r.id != cj_rec.id] clusters_alis.append(c_recs) j_recs_ids_clusters_dict[clust_id] = [r for r in j_recs if r.id == cj_rec.id][0] for clust_id, c_recs in enumerate(clusters_alis): if clust_id + 1 == len(clusters_alis): break # Always use the bridge of the first alignment as an anchor. j_recs = [j_recs_ids_clusters_dict[0], j_recs_ids_clusters_dict[clust_id+1]] j_ids = [r.id for r in j_recs] j_seqs = [str(r.seq) for r in j_recs] # On the first iteration join the first and the second alignment. if clust_id == 0: msa_ids = [] msa_seqs = [] for clust in (c_recs, clusters_alis[clust_id+1]): msa_ids.extend([r.id for r in clust]) msa_seqs.append([str(r.seq) for r in clust]) # On successive iterations use the alignment built in the previous iteration. else: old_recs = list(SeqIO.parse(os.path.join(self.pymod.alignments_dirpath, alignment_joining_output + ".aln"), "clustal")) msa_ids = [r.id for r in old_recs] msa_ids.extend([r.id for r in clusters_alis[clust_id+1]]) msa_seqs = [[str(r.seq) for r in old_recs]] msa_seqs.append([str(r.seq) for r in clusters_alis[clust_id+1]]) # Actually joins the alignments. results = join_alignments(j_msa=j_seqs, msa_l=msa_seqs) # Saves the alignment. seq_records = [] for aliseq, rec_id in zip(results, msa_ids): seq_records.append(SeqRecord(Seq(str(aliseq)), id=rec_id)) SeqIO.write(seq_records, os.path.join(self.pymod.alignments_dirpath, alignment_joining_output + ".aln"), "clustal") #----------------------- # Prepares the output. - #----------------------- # Builds a list of the elements to update. self.build_elements_to_align_dict(elements_to_update) # Sets the name of the final alignment output file. self.protocol_output_file_name = alignment_joining_output # The temporary file will be deleted later at the end of the 'alignment_state' method. ################################################################# # Import the updated sequences in PyMod. # ################################################################# def create_alignment_element(self): """ A method to create a PyMod element for the alignment and to build a cluster to contain the aligned sequences. """ #------------------------- # Build a new alignment. - #------------------------- if self.alignment_mode == "build-new-alignment": # Gets the position in the list of PyMod elements where the new will will be displayed. lowest_index = min([self.pymod.get_pymod_element_index_in_root(e) for e in self.elements_to_align]) # Actually creates the new PyMod alignment element. self.alignment_element = self.pymod.add_new_cluster_to_pymod(cluster_type="alignment", # cluster_name=ali_name, child_elements=self.elements_to_align, algorithm=self.protocol_name, update_stars=True) # sorted(self.elements_to_align,key=lambda el: (el.mother_index,el.child_index)): # Moves the new element from the bottom of the list to its new position. self.pymod.change_pymod_element_list_index(self.alignment_element, lowest_index) #----------------------------- # Rebuilds an old alignment. - #----------------------------- elif self.alignment_mode == "rebuild-old-alignment": self.alignment_element = self.pymod.get_selected_clusters()[0] if self.alignment_element.cluster_type == "alignment": self.alignment_element.my_header = self.pymod.set_alignment_element_name(pymod_vars.algs_full_names_dict[self.protocol_name], self.alignment_element.cluster_id) elif self.alignment_element.cluster_type == "blast-search": self.alignment_element.my_header = self.updates_blast_search_element_name(self.alignment_element.my_header, pymod_vars.alignment_programs_full_names_dictionary[self.protocol_name]) self.update_alignment_element(self.alignment_element, new_algorithm=self.protocol_name) #--------------------------------------------------------- # Expand an already existing cluster with new sequences. - #--------------------------------------------------------- elif self.alignment_mode == "keep-previous-alignment": # Gets the target cluster element. self.alignment_element = self.involved_clusters_list[self.target_cluster_index] # Appends new sequences to the target cluster. for element in self.elements_to_add: self.alignment_element.add_child(element) # Updates the alignment element with new information about the new alignment. self.alignment_element.algorithm = "merged" # alignment_description = "merged with %s" % (pymod_vars.algs_full_names_dict[self.protocol_name]) alignment_description = "merged" self.alignment_element.my_header = self.pymod.set_alignment_element_name(alignment_description, self.alignment_element.cluster_id) #-------------------------------------- # Join two or more existing clusters. - #-------------------------------------- elif self.alignment_mode == "alignment-joining": # Find the right mother index in order to build the new cluster where one of the # original ones was placed. lowest_index = min([self.pymod.get_pymod_element_index_in_root(e) for e in self.elements_to_align]) # Move all the sequences in the new cluster. new_elements = [] bridges_list = [] # First appends the mothers (if any) to the new cluster. for e in self.selected_elements: if e.is_root_sequence(): new_elements.append(e) bridges_list.append(e) # Then appends the children. for cluster in self.involved_clusters_list: for c in cluster.get_children(): new_elements.append(c) if c.selected: bridges_list.append(c) # Orders them. new_elements = sorted(new_elements,key=lambda el: (self.pymod.get_pymod_element_index_in_root(el), self.pymod.get_pymod_element_index_in_container(el))) # Marks the bridges so that they are displayed with a "b" in their cluster. # for b in bridges_list: # # b.is_bridge = True # b.bridge = True alignment_description = "joined by using " + pymod_vars.algs_full_names_dict[self.protocol_name] # Builds the new "PyMod_element" object for the new alignment. # ali_name = "Joined " + self.pymod.set_alignment_element_name(alignment_description, self.pymod.alignment_count) self.alignment_element = self.pymod.add_new_cluster_to_pymod(cluster_type="alignment", # cluster_name=ali_name, child_elements=new_elements, algorithm=self.protocol_name, # +"-joined", update_stars=True) # sorted(self.elements_to_align,key=lambda el: (el.mother_index,el.child_index)): # Moves the new element from the bottom of the list to its new position. self.pymod.change_pymod_element_list_index(self.alignment_element, lowest_index) ################################################################################################### # Sequence alignments. # ################################################################################################### class Regular_sequence_alignment(Regular_alignment): def check_alignment_selection(self): """ Checks that there are at least two sequences that can be aligned in a "regular-alignment". """ # Checks that there are at least two sequences. correct_selection = False if len(self.selected_elements) > 1: correct_selection = True return correct_selection def selection_not_valid(self): """ Called to inform the user that there is not a right selection in order to perform an alignment. """ title = "Selection Error" message = "Please select two or more sequences for the alignment." self.pymod.main_window.show_error_message(title, message) ################################################################################################### # Structural alignments. # ################################################################################################### class Regular_structural_alignment(Regular_alignment): def check_alignment_selection(self): # And that only sequences with structures are selected. if False in [e.has_structure() for e in self.pymod.get_selected_sequences()]: self.invalid_selection_message = "Please select only elements with structures." return False # And that no nucleci acids are selected. if True in [e.polymer_type == "nucleic_acid" for e in self.pymod.get_selected_sequences()]: self.invalid_selection_message = "Can not perform a structural alignment with nucleic acids elements." return False # Checks that there are at least two selected elements. if len(self.selected_elements) < 2: self.invalid_selection_message = "Please select two or more structures." return False return True def selection_not_valid(self): title = "Structures Selection Error" self.pymod.main_window.show_error_message(title, self.invalid_selection_message) def get_options_from_gui(self): self.compute_rmsd_option = self.alignment_window.get_compute_rmsd_option_value() return True def compute_rmsd_dict(self, aligned_elements): """ Add information to build a root mean square deviation matrix. These RMSD will be computed only once, when the structural alignment first built. """ # Prepares a dictionary of coordinates for each aligned structure. Each element will have a # dictionary in which residues indices are the keys and arrays with coordinates are the values. # Coordinates are collected here, because the PyMOL 'get_coords' method is slow and calling # it only once per structure speeds up RMSD matrix calculation. all_coords_dict = {} for e in aligned_elements: residues, coords = self.get_coords_array(e, "ca", get_selectors=False) all_coords_dict[e] = dict([(r.db_index, c) for (r, c) in zip(residues, coords)]) # Actually computes the RMSD matrix. rmsd_dict = {} for i, ei in enumerate(aligned_elements): for j, ej in enumerate(aligned_elements): if j > i: rmsd = self.get_rmsd(ei, ej, all_coords_dict[ei], all_coords_dict[ej]) # This will fill "half" of the matrix. rmsd_dict.update({(ei.unique_index, ej.unique_index): rmsd}) # This will fill the rest of the matrix. rmsd_dict.update({(ej.unique_index, ei.unique_index): rmsd}) elif j == i: rmsd_dict.update({(ei.unique_index, ej.unique_index): None}) # 0.0}) return rmsd_dict def set_rmsd_dict(self): if self.use_rmsd_matrix(): self._rmsd_dict = self.compute_rmsd_dict(self.elements_to_align) else: self._rmsd_dict = None def update_additional_information(self): self.set_rmsd_dict() self.alignment_element.rmsd_dict = self._rmsd_dict def show_additional_alignment_output(self): if self.use_rmsd_matrix(): self.pymod.display_rmsd_matrix(self.alignment_element) def use_rmsd_matrix(self): return self.compute_rmsd_option and self.alignment_mode in ("build-new-alignment", "rebuild-old-alignment")
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/_base_alignment/_gui.py
.py
12,392
265
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import sys import pymod_lib.pymod_vars as pmdt from pymol.Qt import QtWidgets, QtGui, QtCore from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_radioselect_qt) class Alignment_window_qt(PyMod_protocol_window_qt): def add_middle_frame_widgets(self): """ The middle frame of the window will contain a frame with widgets to choose the alignment mode and a frame with widgets to change the alignment algorithm parameters. """ self.build_alignment_mode_frame() self.build_algorithm_options_frame() def build_alignment_mode_frame(self): """ Builds a frame with some options to choose the alignment mode. """ # Vbox which will store all the widgets for the alignment mode options. self.alignment_mode_vbox = QtWidgets.QVBoxLayout() self.alignment_mode_label = QtWidgets.QLabel("Alignment Mode") self.alignment_mode_vbox.addWidget(self.alignment_mode_label) self.alignment_mode_button_group = QtWidgets.QButtonGroup() self.build_strategy_specific_modes_frames() # Defined in child classes. self.middle_formlayout.addRow(self.alignment_mode_vbox) def build_algorithm_options_frame(self): """ Options to choose the parameters of the alignment algoirthm being used. """ self.build_algorithm_options_widgets() def build_strategy_specific_modes_frames(self): """ Build components of the GUI to show the alignment options. """ pass def build_algorithm_options_widgets(self): pass def get_alignment_mode(self): for radiobutton in self.alignment_mode_button_group.buttons(): if radiobutton.isChecked(): return radiobutton._value raise ValueError("No alignment mode was selected.") ################################################################################################### # ALIGNMENT STRATEGIES. # ################################################################################################### class Regular_alignment_window_qt(Alignment_window_qt): """ Base class to build alignment windows for regular alignments. """ def build_strategy_specific_modes_frames(self): #---------------------------- # Rebuild an old alignment. - #---------------------------- if self.protocol.rebuild_single_alignment_choice: new_alignment_rb_text = "Rebuild alignment" new_alignment_rb_help = "Rebuild the alignment with all its sequences." self.new_alignment_radiobutton = QtWidgets.QRadioButton(new_alignment_rb_text) self.new_alignment_radiobutton.clicked.connect(self.click_on_build_new_alignment_radio) self.new_alignment_radiobutton._value = "rebuild-old-alignment" self.new_alignment_radiobutton.setChecked(True) self.alignment_mode_vbox.addWidget(self.new_alignment_radiobutton) self.alignment_mode_button_group.addButton(self.new_alignment_radiobutton) return None #------------------------------------------------------ # Build a new alignment using the selected sequences. - #------------------------------------------------------ new_alignment_rb_text = "Build a new alignment" new_alignment_rb_help = "Build a new alignment from scratch using the selected sequences." self.new_alignment_radiobutton = QtWidgets.QRadioButton(new_alignment_rb_text) self.new_alignment_radiobutton.clicked.connect(self.click_on_build_new_alignment_radio) self.new_alignment_radiobutton._value = "build-new-alignment" self.new_alignment_radiobutton.setChecked(True) self.alignment_mode_vbox.addWidget(self.new_alignment_radiobutton) self.alignment_mode_button_group.addButton(self.new_alignment_radiobutton) #-------------------- # Alignment joiner. - #-------------------- # This can be performed only if there is one selected child per cluster. if len(self.protocol.involved_clusters_list) > 1 and self.protocol.check_alignment_joining_selection(): # alignment_joiner_rb_text = "Join the alignments using the selected sequences as bridges (see 'Alignment Joining')." self.join_alignments_radiobutton = QtWidgets.QRadioButton("Join Alignments") self.join_alignments_radiobutton.clicked.connect(self.click_on_alignment_joiner_radio) self.join_alignments_radiobutton._value = "alignment-joining" self.alignment_mode_vbox.addWidget(self.join_alignments_radiobutton) self.alignment_mode_button_group.addButton(self.join_alignments_radiobutton) #--------------------------- # Keep previous alignment. - #--------------------------- # Right now it can be used only when the user has selected one sequence in a cluster # and one sequence outside a cluster. if (# Only one selected cluster. len(self.protocol.involved_clusters_list) == 1 and # Only one selected sequence in the selected cluster. self.protocol.pymod.check_only_one_selected_child_per_cluster(self.protocol.involved_clusters_list[0]) and # Only one selected sequence outside any cluster. len(self.protocol.selected_root_sequences_list) == 1): self.keep_previous_alignment_radiobutton = QtWidgets.QRadioButton("Keep previous alignment") self.keep_previous_alignment_radiobutton.clicked.connect(self.click_on_keep_previous_alignment_radio) self.keep_previous_alignment_radiobutton._value = "keep-previous-alignment" self.alignment_mode_vbox.addWidget(self.keep_previous_alignment_radiobutton) self.alignment_mode_button_group.addButton(self.keep_previous_alignment_radiobutton) def click_on_build_new_alignment_radio(self): pass def click_on_alignment_joiner_radio(self): pass def click_on_keep_previous_alignment_radio(self): pass class Profile_alignment_window_qt(Alignment_window_qt): """ Base class to build windows of profile alignment protocols. """ def build_strategy_specific_modes_frames(self): """ Build components of the GUI to show the alignment options. """ #------------------------------------------ # Perform a profile to profile alignment. - #------------------------------------------ if self.protocol.can_perform_ptp_alignment: # profile_profile_rb_text = "Profile to profile: perform a profile to profile alignment." profile_profile_rb_text = "Profile to profile" self.profile_to_profile_radiobutton = QtWidgets.QRadioButton(profile_profile_rb_text) self.profile_to_profile_radiobutton.clicked.connect(self.click_on_profile_to_profile_radio) self.profile_to_profile_radiobutton._value = "profile-to-profile" self.profile_to_profile_radiobutton.setChecked(True) self.alignment_mode_vbox.addWidget(self.profile_to_profile_radiobutton) self.alignment_mode_button_group.addButton(self.profile_to_profile_radiobutton) #----------------------------------------- # Perform sequence to profile alignment. - #----------------------------------------- sequence_profile_rb_text = None build_target_profile_frame = False # Shows a different label for the checkbutton if there is one or more clusters involved. if len(self.protocol.selected_clusters_list) > 1: # sequence_profile_rb_text = "Sequence to profile: align to a target profile the rest of the selected sequences." sequence_profile_rb_text = "Sequence to profile" build_target_profile_frame = True elif len(self.protocol.selected_clusters_list) == 1: profile_cluster_name = self.protocol.involved_clusters_list[0].my_header # sequence_profile_rb_text = "Sequence to profile: align the selected sequence to the target profile '%s'." % (profile_cluster_name) sequence_profile_rb_text = "Sequence to profile" # Radiobutton. self.sequence_to_profile_radiobutton = QtWidgets.QRadioButton(sequence_profile_rb_text) self.sequence_to_profile_radiobutton.clicked.connect(self.click_on_sequence_to_profile_radio) self.sequence_to_profile_radiobutton._value = "sequence-to-profile" if not self.protocol.can_perform_ptp_alignment: self.sequence_to_profile_radiobutton.setChecked(True) self.alignment_mode_vbox.addWidget(self.sequence_to_profile_radiobutton) self.alignment_mode_button_group.addButton(self.sequence_to_profile_radiobutton) # If there is more than one selected cluster, then build a frame to let the user choose # which is going to be the target profile. if build_target_profile_frame: # Frame with the options to choose which is going to be the target profile. self.target_profile_frame = QtWidgets.QFormLayout() self.alignment_mode_vbox.addLayout(self.target_profile_frame) # Label. self.target_alignment_label = QtWidgets.QLabel("Target profile:") self.target_alignment_label.setStyleSheet("margin-left: 35px") # Combobox. self.target_alignment_combobox = QtWidgets.QComboBox() for cluster in self.protocol.involved_clusters_list: self.target_alignment_combobox.addItem(cluster.my_header) self.target_alignment_combobox.setEditable(False) self.target_profile_frame.addRow(self.target_alignment_label, self.target_alignment_combobox) self.target_alignment_combobox.setFixedWidth(self.target_alignment_combobox.sizeHint().width()) def click_on_profile_to_profile_radio(self): if hasattr(self, "target_profile_frame"): self.target_alignment_combobox.hide() self.target_alignment_label.hide() def click_on_sequence_to_profile_radio(self): if self.protocol.can_perform_ptp_alignment: self.target_alignment_combobox.show() self.target_alignment_label.show() def get_selected_cluster_index(self): return self.target_alignment_combobox.currentIndex() def show(self): Alignment_window_qt.show(self) # If the profile to profile option is available, the 'target_profile_frame' will be # hidden until the user clicks on the "sequence_to_profile_radiobutton". This is # performed here, because the window has to resize correctly by considering also # the space occupied by the widgets in the 'target_profile_frame' (which will # not be considered if they are already hidden). if self.protocol.can_perform_ptp_alignment: self.target_alignment_combobox.hide() self.target_alignment_label.hide() ################################################################################################### # ALGORITHMS SPECIFIC CLASSES. # ################################################################################################### class Structural_alignment_base_window_qt: def build_rmsd_option(self): # Decide whether to compute the RMSD matrix if the structural alignment. self.compute_rmsd_rds = PyMod_radioselect_qt(label_text="Compute RMSD Matrix", buttons=('Yes', 'No')) self.compute_rmsd_rds.setvalue("Yes") self.middle_formlayout.add_widget_to_align(self.compute_rmsd_rds) def get_compute_rmsd_option_value(self): return pmdt.yesno_dict[self.compute_rmsd_rds.getvalue()]
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/_base_alignment/_base_profile_alignment.py
.py
7,887
164
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Profile alignments. """ from pymod_lib import pymod_vars from pymod_lib.pymod_protocols.alignment_protocols._base_alignment import Alignment_protocol from pymod_lib.pymod_exceptions import catch_protocol_exception class Profile_alignment(Alignment_protocol): alignment_strategy = "profile-alignment" ################################################################# # Start the alignment process. # ################################################################# def check_alignment_selection(self): """ Checks if the selected elements can be used to perform a profile alignment. """ # This will be set to True if there is an adequate selection in order to align two profiles. self.can_perform_ptp_alignment = False # Checks if there is at least one cluster which is entirely selected. number_of_selected_clusters = len(self.selected_clusters_list) number_of_involved_clusters = len(self.involved_clusters_list) number_of_root_sequences = len(self.selected_root_sequences_list) # No clusters are involved. if number_of_selected_clusters == 0: return False # If there is only one selected cluster and if there is at least one selected sequence this # cluster, then a sequence to profile alignment can be performed. if number_of_involved_clusters == 1 and number_of_selected_clusters == 1 and number_of_root_sequences > 0: return True # Two involved clusters. elif number_of_involved_clusters == 2: # If there aren't any other selected sequences a profile to profile alignment can be # performed. if number_of_selected_clusters == 2 and number_of_root_sequences == 0: self.can_perform_ptp_alignment = True return True # Only sequence to profile alignments can be performed. elif number_of_involved_clusters >= 3: return True else: return False def selection_not_valid(self): title = "Selection Error" message = ("Please select at least one entire cluster and some other sequences" " in order to perform a profile alignment.") self.pymod.main_window.show_error_message(title, message) def check_sequences_level(self): self.clusters_are_involved = True return True ################################################################# # Perform the alignment. # ################################################################# def define_alignment_mode(self): """ Gets several parameters from the GUI in order to define the alignment mode. """ # It can be either "sequence-to-profile" or "profile-to-profile". self.alignment_mode = self.alignment_window.get_alignment_mode() # Takes the index of the target cluster. self.target_cluster_index = None # Takes the index of the target cluster for the "keep-previous-alignment" mode. if self.alignment_mode == "sequence-to-profile": # If there is only one cluster involved its index its going to be 0. if len(self.selected_clusters_list) == 1: self.target_cluster_index = 0 # Cluster index. # Get the index of the cluster from the combobox. elif len(self.selected_clusters_list) > 1: if hasattr(self.alignment_window.target_profile_frame, "get_selected_cluster_index"): self.target_cluster_index = self.alignment_window.target_profile_frame.get_selected_cluster_index() else: self.target_cluster_index = self.alignment_window.get_selected_cluster_index() @catch_protocol_exception def perform_alignment_protocol(self): if self.alignment_mode == "sequence-to-profile": self.perform_sequence_to_profile_alignment() elif self.alignment_mode == "profile-to-profile": self.perform_profile_to_profile_alignment() ###################################################### # Methods to perform sequence-to-profile alignments. # ###################################################### def perform_sequence_to_profile_alignment(self): self.run_sequence_to_profile_alignment_program() ##################################################### # Methods to perform profile-to-profile alignments. # ##################################################### def perform_profile_to_profile_alignment(self): self.run_profile_to_profile_alignment_program() ################################################################# # Import the updated sequences in PyMod. # ################################################################# def create_alignment_element(self): #--------------------------------------------------------- # Expand an already existing cluster with new sequences. - #--------------------------------------------------------- if self.alignment_mode == "sequence-to-profile": # Gets the target cluster element. self.alignment_element = self.involved_clusters_list[self.target_cluster_index] # Appends new sequences to the target cluster. for element in self.elements_to_add: self.alignment_element.add_child(element) # Updates the alignment element with new information about the new alignment. self.alignment_element.algorithm = "merged" # alignment_description = "merged with %s" % (pymod_vars.algs_full_names_dict[self.protocol_name]) alignment_description = "merged" self.alignment_element.my_header = self.pymod.set_alignment_element_name(alignment_description, self.alignment_element.cluster_id) #-------------------------------------- # Join two or more existing clusters. - #-------------------------------------- elif self.alignment_mode == "profile-to-profile": # Find the right mother index in order to build the new cluster where one of the # original ones was placed. lowest_index = min([self.pymod.get_pymod_element_index_in_root(e) for e in self.elements_to_align]) # Orders them. self.elements_to_align = sorted(self.elements_to_align, key=lambda el: (self.pymod.get_pymod_element_index_in_root(el), self.pymod.get_pymod_element_index_in_container(el))) alignment_description = "joined by using " + pymod_vars.algs_full_names_dict[self.protocol_name] # ali_name = "Joined " + self.pymod.set_alignment_element_name(alignment_description, self.pymod.alignment_count) # Builds the new "PyMod_element" object for the new alignment. new_cluster = self.pymod.add_new_cluster_to_pymod(cluster_type="alignment", # cluster_name=ali_name, child_elements=self.elements_to_align, algorithm=self.protocol_name, update_stars=True) # Moves the new element from the bottom of the list to its new position. self.pymod.change_pymod_element_list_index(new_cluster, lowest_index)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/alignment_protocols/_base_alignment/__init__.py
.py
15,770
361
# Copyright 2016 by Chengxin Zhang, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import shutil from Bio import SeqIO from pymod_lib import pymod_vars from pymod_lib.pymod_seq.seq_manipulation import compute_sequence_identity from pymod_lib.pymod_protocols.base_protocols import PyMod_protocol from pymod_lib.pymod_threading import Protocol_exec_dialog from pymod_lib.pymod_exceptions import catch_protocol_exception class Alignment_protocol(PyMod_protocol): """ A base class for alignment protocols. """ ################################################################# # Step 1/3 for performing an alignment from the main menu. # # Methods to launch an alignment program and check if it can be # # used (for example, if it is installed on the user's machine). # ################################################################# def launch_from_gui(self): if self.alignment_program_exists(): self.initialize_alignment() else: self.alignment_program_not_found() def alignment_program_exists(self): """ Returns 'True' if the program full path was specified in the PyMod Options window. """ return self.tool.tool_file_exists() def alignment_program_not_found(self): """ Displays an error message that tells the user that some program was not found. """ self.tool.tool_file_not_found() ################################################################# # Step 2/3 for performing an alignment from the main menu. # # Methods to check if the user built a correct selection in # # order to perform an alignment and the start the alignment. # ################################################################# def initialize_alignment(self): """ This method will check if there is a correct selection in order to perform an alignment, and it will create a window with the alignment options if necessary. """ # A list of all kind of elements (both sequences, alignment and blast-search) that were # selected by the user. This is going to be used in other methods too, later in the Pymod # alignment process. self.selected_elements = [] # If among the selected sequences there are some leader sequences of some collapsed cluster, # ask users if they want to include their hidden siblings in the alignment. self.extend_selection_to_hidden_children() # Builds a list of the selected elements. self.selected_elements = self.pymod.get_selected_elements() # This will build a series of lists containing informations about which cluster was selected # by the user. self.build_cluster_lists() # Check if there are some sequences with an associated structure involved. self.structures_are_selected = True in [e.has_structure() for e in self.selected_elements] # First check if the selection is correct. if not self.check_alignment_selection(): self.selection_not_valid() return None # Ask if the user wants to proceed with rebuild-entire-old-alignment or extract-siblings if # needed. if not self.check_sequences_level(): return None # Attribute to store a dictionary representing a RMSD matrix of a structural alignment. self._rmsd_dict = None # Programs that need a window to display their options. self.show_options_window() ################################################################# # Structure of the windows showed when performing an alignment. # ################################################################# def show_options_window(self): """ This method builds the structure of the alignment options window. """ Alignment_window_class_qt = self.get_alignment_window_class_qt() self.alignment_window = Alignment_window_class_qt(self.pymod.main_window, self, title=" %s Options " % (pymod_vars.algs_full_names_dict[self.protocol_name]), upper_frame_title="Here you can modify options for %s" % (pymod_vars.algs_full_names_dict[self.protocol_name]), submit_command=self.alignment_state) self.alignment_window.show() ################################################################# # Step 3/3 for performing an alignment from the main menu. # # Methods to launch an alignment and to update the sequences # # loaded in PyMod once the alignment is complete. # ################################################################# @catch_protocol_exception def alignment_state(self): """ This method is called either by the "start_alignment()" method or when the 'SUBMIT' button in some alignment window is pressed. It will first define the alignment mode according to the choices made by the user. Then, depending on the alignment strategy and the alignment mode, it will execute all the steps necessary to perform the alignment. """ # Gets the parameters from the GUI in order to chose the kind of alignment to perform. self.define_alignment_mode() if not self.get_options_from_gui(): return None try: self.alignment_window.destroy() except: pass # This list is going to be used inside in other methods of this class needed to perform the # alignment. self.elements_to_align = [] self.elements_to_align_dict = {} self.protocol_output_file_name = None #----------------------------------- # Actually performs the alignment. - #----------------------------------- if not self.pymod.use_protocol_threads: self.perform_alignment_protocol() else: label_text = ("Running %s. Please wait for the process to" " complete..." % pymod_vars.algs_full_names_dict[self.protocol_name]) lock_dialog = self.protocol_name.startswith("salign") # A MODELLER thread can no be exited safely. p_dialog = Protocol_exec_dialog(app=self.pymod.main_window, pymod=self.pymod, function=self.perform_alignment_protocol, args=(), wait_start=0.4, wait_end=0.4, lock=lock_dialog, stdout_silence=lock_dialog, title="Running %s" % pymod_vars.algs_full_names_dict[self.protocol_name], label_text=label_text) p_dialog.exec_() #---------------------------------------------------------------------- # Updates the PyMod elements just aligned and completes the protocol. - #---------------------------------------------------------------------- self.create_alignment_element() self.update_aligned_elements() self.finish_alignment() self.show_additional_alignment_output() def get_options_from_gui(self): """ A protocol-specific method. """ return True def build_elements_to_align_dict(self, elements_to_align): for element in elements_to_align: self.elements_to_align_dict.update({element.get_unique_index_header(): element}) def update_alignment_element(self, alignment_element, new_algorithm=None): if new_algorithm: alignment_element.algorithm = new_algorithm def update_aligned_elements(self): """ Called when an alignment is performed. It updates the sequences with the indels obtained in the alignment. And also deletes the temporary files used to align the sequences. """ self.update_aligned_sequences() # Performs additional operations on the aligned sequences. self.perform_additional_sequence_editing() # Alignment objects built using different algorithms, store different additional data. self.update_additional_information() self.pymod.main_window.gridder(clear_selection=True, update_clusters=True, update_menus=True, update_elements=True) def update_aligned_sequences(self): self.update_aligned_sequences_with_modres() def update_aligned_sequences_with_modres(self): """ Used when the aligned sequences in the output file already have modres. """ # Gets from an alignment file the sequences with their indels produced in the alignment. ouput_handle = open(os.path.join(self.pymod.alignments_dirpath, self.protocol_output_file_name + ".aln"), "r") records = list(SeqIO.parse(ouput_handle, "clustal")) ouput_handle.close() # Updates the sequences. for a, r in enumerate(records): element_to_update = self.elements_to_align_dict[str(r.id)] self.update_single_element_sequence(element_to_update, r.seq) def update_aligned_sequences_inserting_modres(self, replace_modres_symbol=None): """ When importing alignments built by programs that remove the modified residues (X symbols) from the sequences, this method will reinsert them in the sequences. """ # Gets the aligned sequences from an alignment file. input_handle = open(os.path.join(self.pymod.alignments_dirpath, self.protocol_output_file_name + ".aln"), "r") records = list(SeqIO.parse(input_handle, "clustal")) input_handle.close() # Aligns the full sequences (with 'X' characters) to the sequence without 'X' characters. elements_to_update = [self.elements_to_align_dict[str(r.id)] for r in records] residues_to_insert_dict = {} elements_seqlist_dict = {} for e, r in zip(elements_to_update, records): new_seq, old_seq = e.trackback_sequence(r.seq) # Gets the list of indices where 'X' were inserted. for i, (rn, ro) in enumerate(zip(new_seq, old_seq)): if rn == "X" and ro == "-": if i in residues_to_insert_dict: residues_to_insert_dict[i].append(e) else: # residues_to_insert_dict.update({i: [e]}) residues_to_insert_dict[i] = [e] # Builds lists from sequences. elements_seqlist_dict.update({e: list(e.my_sequence)}) # For each inserted 'X' in a sequence, insert gaps in other sequences. inserted_res_count = 0 for res_id in sorted(residues_to_insert_dict.keys()): inserted = False for e in elements_to_update: if not e in residues_to_insert_dict[res_id]: elements_seqlist_dict[e].insert(res_id+inserted_res_count, "-") inserted = True if inserted: inserted_res_count += 1 # Actually updates the sequences. for e in elements_to_update: e.set_sequence("".join(elements_seqlist_dict[e])) def update_single_element_sequence(self, element_to_update, new_sequence): element_to_update.set_sequence(str(new_sequence)) def perform_additional_sequence_editing(self): """ This method will be overidden in children classes. """ pass def update_additional_information(self): """ This method will be overidden in children classes. """ pass ################################################################# # Finish the alignment. # ################################################################# def remove_alignment_temp_files(self): """ Used to remove the temporary files produced while performing an alignment. """ def check_file_to_keep(file_basename): file_name = os.path.splitext(file_basename)[0] # The only files that are not going to be deleted are guide tree or tree files generated # from an alignment. They will be kept in order to be accessed by users who wants to # inspect the trees. Their names are going to be built like the following: # (self.alignments_files_name) + (alignment_id) + ("_guide_tree" or "_align_tree") # resulting in: # alignment_n_guide_tree.dnd or alignment_n_guide_tree.dnd if file_name.startswith(self.pymod.alignments_files_names) and (file_name.endswith("guide_tree") or file_name.endswith("align_tree") or file_name.endswith("dendrogram")): return False else: return True files_to_remove = [file_basename for file_basename in os.listdir(self.pymod.alignments_dirpath) if check_file_to_keep(file_basename)] for file_basename in files_to_remove: file_path_to_remove = os.path.join(self.pymod.alignments_dirpath,file_basename) os.remove(file_path_to_remove) def finish_alignment(self): pass def show_additional_alignment_output(self): pass ################################################################## # Common methods used to execute alignments in several # # protocols. # ################################################################## def generate_highest_identity_pairs_list(self, initial_alignment_name): """ For each sequence to add to the alignment, finds the nearest selected sequence (in terms of sequence identity) of the target cluster according to the information of previous multiple alignment between all the sequences. """ # Reads the output file of the alignment and stores in a variable a list of its biopython # record objects. initial_alignment_file = open(os.path.join(self.pymod.alignments_dirpath, initial_alignment_name + ".aln"), "r") initial_alignment_records = list(SeqIO.parse(initial_alignment_file, "clustal")) initial_alignment_file.close() # A list that is going to contain as many rows as the sequence to add to the alignment and # as many columns as the selected sequences in target alignment. pair_list=[] index = 0 # Parses the records in the fasta file with the initial alignment just generated. for element in initial_alignment_records: for sequence in self.elements_to_add: # If the sequence in the list is the same of some element in the records. if element.id == sequence.get_unique_index_header(): pair_list.append([]) # Parses the list for sequences fo the alignment to keep. for structure in initial_alignment_records: for struct in self.selected_sequences_in_target_alignment: if structure.id == struct.get_unique_index_header(): identity = compute_sequence_identity(element.seq, structure.seq) pair_list[index].append(identity) index += 1 return pair_list
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/domain_analysis_protocols/split_domains.py
.py
6,058
139
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Protocol to split a sequence in a series of subsequences, each representing a domain. """ import os from pymod_lib.pymod_protocols.base_protocols import PyMod_protocol from pymod_lib.pymod_element_feature import Domain_feature from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_entryfield_qt, PyMod_radioselect_qt, askyesno_qt) class Split_into_domains_protocol(PyMod_protocol): protocol_name = "Domain Split" def __init__(self, pymod, pymod_element, output_directory=None): PyMod_protocol.__init__(self, pymod, output_directory) self.pymod_element = pymod_element def launch_from_gui(self): """ Launches the domain splitting procedure from the PyMod GUI. """ # If the sequence has already some derived domains, ask the users whether to repeat the # splitting operation. if self.pymod_element.derived_domains_list: confirmation = askyesno_qt("Confirm", "Do you want to overwrite the previous splitting operation?", parent=self.pymod.get_qt_parent()) if not confirmation: return None # Show the options window. self.split_seq_offset_window = SplitSeqOffsetWindow_qt(self.pymod.main_window, protocol=self, title="Choose the Domain Offset", upper_frame_title="Choose how many flanking residues\nto append to each domain", submit_command=self.split_seq_offset_window_state) self.split_seq_offset_window.show() def split_seq_offset_window_state(self): """ Actually split the query sequence in its domains and load the corresponing elements in PyMod. """ # Gets the number of flanking residues from the GUI. try: n_c_term_offset = int(self.split_seq_offset_window.offset_1_enf.getvalue(validate=True)) except Exception as e: self.pymod.main_window.show_error_message("Input Error", str(e)) return None # Delete previous domains. if self.pymod_element.derived_domains_list: for el in self.pymod_element.derived_domains_list[:]: try: el.delete() except ValueError: # If the user has already deleted it. pass self.pymod_element.clear_derived_domains() self.pymod.main_window.gridder(update_clusters=True, update_menus=True) # Build the PyMod elements derived from the split domains. self.pymod_element.clear_derived_domains() for domain_idx, domain in enumerate(self.pymod_element.get_domains_features()): # Build a new PyMod element for each domain. ungapped = self.pymod_element.my_sequence.replace('-', '') new_startindex = max(0, domain.start-n_c_term_offset) new_endindex = min(len(ungapped), domain.end+n_c_term_offset)+1 new_seq = ungapped[new_startindex:new_endindex] new_name = "domain_%s_%s_%s" % (domain_idx+1, self.pymod_element.domain_analysis_id+1, domain.full_name) # TODO. my_el = self.pymod.build_pymod_element_from_args(new_name, new_seq) # Take the new element representing the split domain and add to it a copy of its # corresponding domain feature. domain_info = domain.get_feature_dict() domain_info.update({"is_derived": True, "offset": (n_c_term_offset, n_c_term_offset)}) domcopy = Domain_feature(**domain_info) my_el.add_domain_feature(domcopy) self.pymod_element.derived_domains_list.append(my_el) # Loads in PyMod. self.pymod.add_element_to_pymod(my_el) # Colors the new sequences (the 'gridder' method will be called in the method below). self.pymod.main_window.color_selection("multiple", self.pymod_element.derived_domains_list, "domains") # Finishes the process. self.evaluate_splitting() self.split_seq_offset_window.destroy() def evaluate_splitting(self): self.pymod.deselect_all_sequences() self.pymod.main_window.gridder(update_clusters=True, update_menus=True) class SplitSeqOffsetWindow_qt(PyMod_protocol_window_qt): """ Window for the header entry command 'Split Sequence Into Domains'. User select the N-term offset anc the C-term offset (in residues) to be left before the beginning and after the end of the domain itself. """ default_offset = 10 # self.geometry("400x250") def build_protocol_middle_frame(self): # Entryfield for offset selection. self.offset_1_enf = PyMod_entryfield_qt(label_text="N-Term and C-Term offset", # label_text = "N-Term offset", value=str(self.default_offset), validate={'validator': 'integer', 'min': 0, 'max': 1000}) self.middle_formlayout.add_widget_to_align(self.offset_1_enf) # self.offset_2_enf = PyMod_entryfield_qt(label_text="C-Term offset", # label_text = "N-Term offset", # value=str(self.default_offset), # validate={'validator': 'integer', # 'min': 0, 'max': 1000}) # self.middle_formlayout.add_widget_to_align(self.offset_2_enf) self.middle_formlayout.set_input_widgets_width(140)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/domain_analysis_protocols/__init__.py
.py
0
0
null
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/domain_analysis_protocols/domain_analysis.py
.py
12,699
276
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Protocols to search and assign domains to a protein sequence loaded in PyMod. """ import os from pymod_lib.pymod_protocols.base_protocols import PyMod_protocol from pymod_lib.pymod_protocols.domain_analysis_protocols.hmmscan import Hmmscan_protocol from pymod_lib.pymod_protocols.domain_analysis_protocols.split_domains import Split_into_domains_protocol from pymod_lib.pymod_os_specific import clean_file_name from pymod_lib.pymod_seq.seq_star_alignment import join_alignments from pymod_lib.pymod_gui.shared_gui_components_qt import askyesno_qt class Domain_Analysis_Protocol(PyMod_protocol): """ This class will launch a 'Hmmscan_protocol' protocol used to identify the domains of a protein query. It will then store all the information useful to perform the operations flow of the domain analysis such as splitting and fuse. """ protocol_name = "Domain Analysis" def __init__(self, pymod, domain_search_mode): """ The 'domain_search_mode' can be: - "local" (a local hmmscan executable will be used to search on local databases) - "remote" (a remote hmmscan search will be performed on the EBI servers). """ if not domain_search_mode in ("local", "remote"): raise KeyError("Unknown 'domain_search_mode': %s" % domain_search_mode) PyMod_protocol.__init__(self, pymod, output_directory=pymod.domain_analysis_dirpath) self.domain_search_mode = domain_search_mode def launch_from_gui(self): # Check for only one selected sequence. selected_elements = self.pymod.get_selected_sequences() if len(selected_elements) != 1: title = "Selection Error" message = "Please select one and only one sequence to perform a domain search." self.pymod.main_window.show_error_message(title, message) return None self.pymod_element = selected_elements[0] # An index to distinguish each domain analysis performed in PyMod. self._index_of_domain_protocol = self.pymod.active_domain_analysis_count # Sets the name which will be used to label the query sequence in the domain analysis. self.query_element_name = "hmmscan_search_%s_%s" % (self.pymod_element.unique_index, self._index_of_domain_protocol) # Saves a FASTA file with the query sequence. self._pymod_element_seq_filepath = os.path.join(self.output_directory, self.query_element_name + '.fasta') self.pymod.build_sequence_file([self.pymod_element], self._pymod_element_seq_filepath, file_format="fasta", remove_indels=True, use_structural_information=False, add_extension=False, unique_indices_headers=False) # This will be storing instances of the various protocol classes for domain analysis. self.search_protocol = None # Launches an hmmscan search. self.run_hmmscan_search() ########################################################################### # Search domains with hmmscan. # ########################################################################### def run_hmmscan_search(self): # Reinitializes the domains list of the query element before searching again. if self.pymod_element.get_domains_features(): message = ("Would you like to perform a new domain search operation on this protein?" " Its previous domain search results will be lost.") confirmation = askyesno_qt("Confirm Domain Search", message, parent=self.pymod.get_qt_parent()) if not confirmation: return None self.pymod_element.clear_domains() self.pymod.main_window.gridder(update_clusters=True, update_menus=True) # Initializes the domain search protocol. self.search_protocol = Hmmscan_protocol(self.pymod, output_directory=self.output_directory, father_protocol=self) self.search_protocol.launch_from_gui() def evaluate_domain_search(self): # Updates PyMod data on domain searches. self.pymod_element.domain_analysis_id = self.pymod.active_domain_analysis_count self.pymod.active_domain_analysis_count += 1 # Updates the menus. self.pymod.main_window.gridder(update_menus=True) ################################################################################################### # Fuse split domains. # ################################################################################################### class Fuse_domains_protocol(PyMod_protocol): """ Protocol used to join in a single alignment the fragments derived from using a 'Split_into_domains_protocol' to split a query protein in its domains. """ protocol_name = "Domain Fuse" def __init__(self, pymod, pymod_element, output_directory=None): PyMod_protocol.__init__(self, pymod, output_directory) self.pymod_element = pymod_element def launch_from_gui(self): if not self.check_fuse_conditions(): return None self.run_fuse_protocol() def check_fuse_conditions(self): fuse_error_title = "Fuse Error" fuse_error_message = "Can not perform the Fuse operation." # Checks if all the derived domains are currently present in PyMod (that is, that no one # got deleted or altered). all_pymod_sequences = self.pymod.get_all_sequences() for domain_element in self.pymod_element.derived_domains_list: if not domain_element in all_pymod_sequences: message = "The '%s' sequence has been deleted. %s" % (domain_element.my_header, fuse_error_message) self.pymod.main_window.show_error_message(fuse_error_title, message) return False # Checks whether all the derived domains are currently aligned to other sequences. for domain_element in self.pymod_element.derived_domains_list: if domain_element.splitted_domain == None: message = "The sequence of the domain element '%s' has been edited, it will not be possible to map it to the original query sequence. %s" % (domain_element.my_header, fuse_error_message) self.pymod.main_window.show_info_message(fuse_error_title, message) return False if not domain_element.is_child(): message = "Not all the domains derived from element '%s' are currently aligned to other sequences. %s" % (self.pymod_element.my_header, fuse_error_message) self.pymod.main_window.show_info_message(fuse_error_title, message) return False other_domain_elements = [d_el for d_el in self.pymod_element.derived_domains_list if not d_el is domain_element] if len(set(other_domain_elements) & set(domain_element.get_siblings())) != 0: message = "Multiple domain sequences derived from the original query element '%s' can not be present in the same alignment. %s" % (self.pymod_element.my_header, fuse_error_message) self.pymod.main_window.show_info_message(fuse_error_title, message) return False return True def run_fuse_protocol(self, verbose=False): if verbose: print('\n\n__________FUSE___________\n') #----------------------------------------------------------------- # Builds multiple sequence alignments for each cluster involved. - #----------------------------------------------------------------- query_seq_gapless = self.pymod_element.my_sequence.replace("-", "") # This will contain a list of multiple sequence alignments (each represented by a list of # aligned sequences). msa_l = [] # This will contain sequences. msa_elements_list = [] # This will contain the PyMod elements to which the sequences above belong to. # Adds the sequences of the domains and their siblings. for domain_i, domain_el in enumerate(self.pymod_element.derived_domains_list): # For each domain, prepare its multiple sequence alignment. domain_msa = [domain_el.my_sequence] domain_elements_msa = [domain_el] for sibling in domain_el.get_siblings(): domain_msa.append(sibling.my_sequence) domain_elements_msa.append(sibling) # In the first iteration, adds as a first MSA the MSA of the original full length query. if domain_i == 0: # Prepares the first multiple sequence alignment (the one for the full length query). query_msa = [self.pymod_element.my_sequence] query_elements_msa = [self.pymod_element] if self.pymod_element.is_child(): for sibling in self.pymod_element.get_siblings(): query_msa.append(sibling.my_sequence) query_elements_msa.append(sibling) msa_l = [query_msa] msa_elements_l = [query_elements_msa] # In the successive iterations, adds as a first MSA the MSA produced in the previous # iteration. else: msa_l = [new_msa] _msa_elements_l = [] for msa_i in msa_elements_l: for seq_j in msa_i: _msa_elements_l.append(seq_j) msa_elements_l = [_msa_elements_l] msa_l.append(domain_msa) msa_elements_l.append(domain_elements_msa) # Prepares the "reference" alignment (an alignment containing the two anchor sequences # of the two alignments which will be joined). j_msa = [query_seq_gapless] domain_seq_gapless = domain_el.my_sequence.replace("-", "") domain_info = domain_el.splitted_domain domain_frag_start = max((0, domain_info.parent_start - domain_info.offset[0])) domain_frag_end = domain_info.parent_end + domain_info.offset[0] domain_seq_gapped = "-"*domain_frag_start + domain_seq_gapless domain_seq_gapped += "-"*(max(0, len(query_seq_gapless)-len(domain_seq_gapped))) j_msa.append(domain_seq_gapped) # Progressively join the alignments. new_msa = join_alignments(j_msa, msa_l) if verbose: print("\n# Temporary MSA %s" % domain_i) for seq in new_msa: print(seq) #------------------- # Get the results. - #------------------- elements_to_add = [] for domain_el in self.pymod_element.derived_domains_list: elements_to_add.append(domain_el) elements_to_add.extend(domain_el.get_siblings()) # Adds the domains and their siblings to the full length query alignment. if self.pymod_element.is_child(): self.pymod_element.mother.add_children(elements_to_add) # Builds a new alignment object. else: query_original_index = self.pymod.get_pymod_element_index_in_container(self.pymod_element) new_blast_cluster = self.pymod.add_new_cluster_to_pymod( cluster_type="alignment", child_elements=[self.pymod_element] + elements_to_add, algorithm="joined", update_stars=False) # Move the new cluster to the same position of the original query element in PyMod main # window. self.pymod.change_pymod_element_list_index(new_blast_cluster, query_original_index) # Updates the sequence of the elements involved. new_msa_elements = [] for msa_i in msa_elements_l: for seq_j in msa_i: new_msa_elements.append(seq_j) for element, updated_seq in zip(new_msa_elements, new_msa): element.set_sequence(updated_seq) # Shows the results in the main window. self.pymod.main_window.gridder(clear_selection=True, update_clusters=True, update_elements=True)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/domain_analysis_protocols/hmmscan/_gui.py
.py
19,582
439
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ GUI for performing hmmscan searches in PyMod. """ from pymol.Qt import QtWidgets, QtCore, QtGui from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_entryfield_qt, PyMod_radioselect_qt, active_entry_style, inactive_entry_style, small_font_style, highlight_color) from pymod_lib.pymod_vars import domain_colors_ordered, convert_rgb_to_hex ################################################################################################### # Hmmscan options window. # ################################################################################################### class Hmmscan_options_window_qt(PyMod_protocol_window_qt): def build_protocol_middle_frame(self): # Add the buttons to choose the database in which to search for domain profiles. if self.protocol.father_protocol.domain_search_mode == 'remote': self.hmmer_database_rds = PyMod_radioselect_qt(label_text="Database Selection", buttons=("PFAM", "Gene3D")) for button in self.hmmer_database_rds.get_buttons(): button.clicked.connect(self.database_opt_cmd) elif self.protocol.father_protocol.domain_search_mode == 'local': # Build the list of database names. self.protocol.hmmscan_db_dict = {} # This dictionary associates a database code (displayed in the GUI) to its filename. db_list = [] for db_filename in self.protocol.hmmscan_db_list: db_name = "".join(db_filename.split(".")[0:-2]) db_list.append(db_name) self.protocol.hmmscan_db_dict[db_name] = db_filename # Packs the PHMMER database selection widget. self.hmmer_database_rds = PyMod_radioselect_qt(label_text="Database Selection", buttons=db_list) self.middle_formlayout.add_widget_to_align(self.hmmer_database_rds) # E-value selection. self.e_value_threshold_enf = PyMod_entryfield_qt(label_text="E-value Threshold", value="1.0", validate={'validator': 'real', 'min': 0.0, 'max': 1000.0}) self.middle_formlayout.add_widget_to_align(self.e_value_threshold_enf) # Note about the Gene3D and Evalues. if self.protocol.father_protocol.domain_search_mode == 'remote': info_note = ('Note: The Gene3D online database will\n' 'ignore custom cut-off parameters since\n' 'they use a post processing step that\n' 'involves preset thresholds.') self.notelabel = QtWidgets.QLabel(info_note) self.middle_formlayout.addRow(self.notelabel) self.middle_formlayout.set_input_widgets_width(140) def database_opt_cmd(self): if self.hmmer_database_rds.getvalue() == 'Gene3D': self.e_value_threshold_enf.entry.setStyleSheet(inactive_entry_style) self.e_value_threshold_enf.entry.setEnabled(False) else: self.e_value_threshold_enf.entry.setStyleSheet(active_entry_style) self.e_value_threshold_enf.entry.setEnabled(True) ################################################################################################### # Hmmscan results window. # ################################################################################################### # results_header_options = {'background': 'black', 'fg': 'red', 'height': 1, 'padx': 10, 'pady': 10, 'font': "comic 12"} # results_row_options = {'background': 'black', 'fg': 'white', 'height': 1, 'highlightbackground': 'black', 'font': "comic 11"} class Hmmscan_results_window_qt(QtWidgets.QMainWindow): """ Window for showing similarity searches results. """ is_pymod_window = True def __init__(self, parent, protocol): super(Hmmscan_results_window_qt, self).__init__(parent) self.protocol = protocol self.query_len = len(self.protocol.query_element.my_sequence.replace('-', '')) ######################### # Configure the window. # ######################### self.setWindowTitle("HMMSCAN Results") # Sets the central widget. self.central_widget = QtWidgets.QWidget() self.setCentralWidget(self.central_widget) # The window has a main vbox layout. self.main_vbox = QtWidgets.QVBoxLayout() # Parameters used to draw the 'QGraphicsView' widgets for showing domains. self.preferred_size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) self.view_bg_color = "transparent" self.full_seq_pen = QtGui.QPen(QtGui.QColor(0, 0, 0, 0), 2) self.full_seq_color = "#7f7f7f" qcolor = QtGui.QColor(0, 0, 0) qcolor.setNamedColor(self.full_seq_color) self.full_seq_brush = QtGui.QBrush(qcolor) self.font_qcolor = QtGui.QColor(220, 220, 220, 255) self.font_size = 7 ################ # Upper frame. # ################ self.upper_frame = QtWidgets.QFrame() self.upper_frame_layout = QtWidgets.QGridLayout() self.upper_frame.setLayout(self.upper_frame_layout) self.main_vbox.addWidget(self.upper_frame) if 'query_descr' in self.protocol.parsed_res[0] and self.protocol.parsed_res[0]['query_descr']: labelseq = self.protocol.query_element.my_header # + '\n' + querydescr else: try: if len(self.protocol.query_element.description) > 79: labelseq = self.protocol.query_element.description[:78] + '...' else: labelseq = self.protocol.query_element.description except TypeError: labelseq = self.protocol.query_element.my_header self.upper_frame_title = QtWidgets.QLabel("HMMSCAN search results for " + labelseq) self.upper_frame_layout.addWidget(self.upper_frame_title) #------------------------- # Domain graphics frame. - #------------------------- # Builds the scene where to draw the domain representations. self.canvas_plot_scene = QtWidgets.QGraphicsScene() self.canvas_plot_view = QtWidgets.QGraphicsView(self.canvas_plot_scene) self.canvas_plot_view.setFixedHeight(120) self.canvas_plot_view.setSizePolicy(self.preferred_size_policy) self.canvas_plot_view.setStyleSheet("background: %s" % self.view_bg_color) self.upper_frame_layout.addWidget(self.canvas_plot_view) # Draw a rectangle with the full sequence. self.x_init = 10 y_init = 95 # 95 self.domain_y_init = y_init-7 self.full_seq_rect_w = 800 full_seq_rect_h = 10 self.canvas_plot_scene.addRect(self.x_init, y_init, self.full_seq_rect_w, full_seq_rect_h, self.full_seq_pen, self.full_seq_brush) # Draw the labels for the N- and C-terminal residues. text_offset_y = 15 text_offset_x = 10 text_n = self.canvas_plot_scene.addText("1") text_n.setPos(self.x_init-text_offset_x, y_init+text_offset_y) text_n.setDefaultTextColor(self.font_qcolor) text_n.setFont(QtGui.QFont(text_n.font().family(), self.font_size)) c_label = str(self.query_len) text_c = self.canvas_plot_scene.addText(c_label) text_offset_x_add = 5 if len(c_label) > 2: text_offset_x_add = 10 text_c.setPos(self.x_init+self.full_seq_rect_w-text_offset_x-text_offset_x_add, y_init+text_offset_y) text_c.setDefaultTextColor(self.font_qcolor) text_c.setFont(QtGui.QFont(text_c.font().family(), self.font_size)) ################# # Middle frame. # ################# # Scroll area which contains the widgets, set as the centralWidget. self.middle_scroll = QtWidgets.QScrollArea() self.main_vbox.addWidget(self.middle_scroll) # Widget that contains the collection of Vertical Box. self.middle_widget = QtWidgets.QWidget() # Scroll area properties. self.middle_scroll.setWidgetResizable(True) self.middle_scroll.setWidget(self.middle_widget) # QFormLayout in the middle frame. self.middle_formlayout = QtWidgets.QFormLayout() self.middle_widget.setLayout(self.middle_formlayout) #----------------- # Results frame. - #----------------- # Set the frame and its layout. self.results_frame = QtWidgets.QFrame() self.middle_formlayout.addRow(self.results_frame) self.results_grid = QtWidgets.QGridLayout() self.results_frame.setLayout(self.results_grid) # Calls a method which actually displays the similarity searches results. self.display_hmmscan_hits() # Align the gridded widgets to the left. self.results_grid.setAlignment(QtCore.Qt.AlignLeft) self.results_grid.setHorizontalSpacing(30) ################# # Bottom frame. # ################# self.main_button = QtWidgets.QPushButton("Submit") self.main_button.clicked.connect(lambda a=None: self.protocol.hmmer_results_state()) self.main_vbox.addWidget(self.main_button) self.main_button.setFixedWidth(self.main_button.sizeHint().width()) # Sets the main vertical layout. self.central_widget.setLayout(self.main_vbox) self.main_vbox.setAlignment(self.main_button, QtCore.Qt.AlignCenter) def display_hmmscan_hits(self): """ This is used to display in the HMMSCAN results window information for each hit and a checkbutton to select it for importing it inside PyMod. """ #------------------------------------ # Shows the headers of the columns. - #------------------------------------ headers_font_style = "%s; color: %s" % (small_font_style, highlight_color) headers_font_style = "%s; font-weight: bold" % (small_font_style) self.hmmscan_seq_label = QtWidgets.QLabel("Name") self.hmmscan_seq_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.hmmscan_seq_label, 0, 0) self.description_label = QtWidgets.QLabel("Description") self.description_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.description_label, 0, 1) self.hmmscan_e_val_label = QtWidgets.QLabel("E-Value") self.hmmscan_e_val_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.hmmscan_e_val_label, 0, 2) self.query_span_label = QtWidgets.QLabel("Query span") self.query_span_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.query_span_label, 0, 3) self.hmm_span_label = QtWidgets.QLabel("HMM span") self.hmm_span_label.setStyleSheet(headers_font_style) self.results_grid.addWidget(self.hmm_span_label, 0, 4) #---------------------- # Prepare the colors. - #---------------------- self.color_palette_dec = domain_colors_ordered # self.color_palette = [[j*255 for j in i[1]] for i in self.color_palette_dec] self.color_palette_hex = [convert_rgb_to_hex(i[1]) for i in self.color_palette_dec] color_idx = 0 for hit in self.protocol.parsed_res: hit.update({'dom_color': self.color_palette_dec[color_idx], 'dom_color_hex':self.color_palette_hex[color_idx]}) color_idx += 1 if color_idx == len(self.color_palette_dec): color_idx = 0 #--------------------------------------- # Show the hits in the results window. - #--------------------------------------- # Keep track of the rows in the grid. hmmscan_output_row = 1 # This is going to contain the list of values of each checkbutton. self.color_square_lst = [] # all in a row self.domain_check_states = [] # clustered self.domain_widgets_dict = {} domain_counter = 0 # Process all the hits. Eeach hit is a different HMM profile, that is, a # different domain type. for hit in self.protocol.parsed_res: # Hit name. hit_name_label = QtWidgets.QLabel(hit['id']) hit_name_label.setStyleSheet(small_font_style) self.results_grid.addWidget(hit_name_label, hmmscan_output_row, 0) # Hit description. if 'desc' in hit: descr_text = hit['desc'][:40] + '...' if len(hit['desc']) > 41 else hit['desc'] else: descr_text = '-' hit_descr_label = QtWidgets.QLabel(descr_text) hit_descr_label.setStyleSheet(small_font_style) self.results_grid.addWidget(hit_descr_label, hmmscan_output_row, 1) # E-value info. evalue_label = QtWidgets.QLabel(str(hit['evalue'])) evalue_label.setStyleSheet("%s; color: %s" % (small_font_style, hit['dom_color_hex'])) # results_row_options self.results_grid.addWidget(evalue_label, hmmscan_output_row, 2) hmmscan_output_row += 1 self.domain_check_states.append([]) for domain_hit in hit['location']: domain_widgets = {"checkbox": None, "rect": None} # Domain rectangle shown in the upper frame of the window. domain_rect = self.add_domain_representation(hit, domain_hit) domain_widgets["rect"] = domain_rect # Checkbox for selection. color_square = QtWidgets.QCheckBox(" ") self.color_square_lst.append(color_square) self.domain_check_states[-1].append(color_square) color_square.clicked.connect(lambda a=None, x=domain_counter: self.toggle_domain(x)) self.results_grid.addWidget(color_square, hmmscan_output_row, 0) domain_widgets["checkbox"] = color_square # Grahical representation of the domain in the query sequence. graphics_view = self.create_little_canvas(hit, domain_hit) self.results_grid.addWidget(graphics_view, hmmscan_output_row, 1) # Individual E-value info. hsp_ievalue_label = QtWidgets.QLabel(str(domain_hit['evalue'])) hsp_ievalue_label.setStyleSheet(small_font_style) self.results_grid.addWidget(hsp_ievalue_label, hmmscan_output_row, 2) # Query span info. span_info_label = QtWidgets.QLabel(str(domain_hit['start']) + ' - ' + str(domain_hit['end'])) span_info_label.setStyleSheet(small_font_style) self.results_grid.addWidget(span_info_label, hmmscan_output_row, 3) # HMM span info. hspan_info_text = str(domain_hit['hmm_start']) + ' - ' + str(domain_hit['hmm_end']) hspan_info_label = QtWidgets.QLabel(hspan_info_text) hspan_info_label.setStyleSheet(small_font_style) self.results_grid.addWidget(hspan_info_label, hmmscan_output_row, 4) hmmscan_output_row += 1 self.domain_widgets_dict[domain_counter] = domain_widgets domain_counter += 1 def add_domain_representation(self, hit, domain_hit): queryspan_start = int(domain_hit['start']) queryspan_end = int(domain_hit['end']) domain_x = self.x_init + int(queryspan_start/float(self.query_len)*self.full_seq_rect_w) domain_y = self.domain_y_init domain_w = int((queryspan_end-queryspan_start)/float(self.query_len)*self.full_seq_rect_w) domain_h = 25 domain_pen = QtGui.QPen(QtGui.QColor(0, 0, 0, 255), 1) qcolor = QtGui.QColor(0, 0, 0) qcolor.setNamedColor(hit['dom_color_hex']) domain_brush = QtGui.QBrush(qcolor) domain_rect = self.canvas_plot_scene.addRect(domain_x, domain_y, domain_w, domain_h, domain_pen, domain_brush) domain_rect.setVisible(False) return domain_rect def create_little_canvas(self, hit, domain_hit, default_width=300, default_height=11): # Builds the graphics view and scene. canvas_plot_scene = QtWidgets.QGraphicsScene() canvas_plot_view = QtWidgets.QGraphicsView(canvas_plot_scene) canvas_plot_view.setFixedHeight(default_height) canvas_plot_view.setFixedWidth(default_width) canvas_plot_view.setSizePolicy(self.preferred_size_policy) canvas_plot_view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) canvas_plot_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) canvas_plot_view.setFrameShape(QtWidgets.QFrame.NoFrame) canvas_plot_view.setStyleSheet("border: 0px; background: %s" % self.view_bg_color) # Get the coordinates of the various graphics elements. one_res_span = default_width/float(self.query_len) # proporzione tra la lunghezza della seq e lo spazio grafico queryspan_start = int(domain_hit['start']) queryspan_end = int(domain_hit['end']) queryspan_start_graphic = int(queryspan_start*one_res_span) queryspan_end_graphic = int(queryspan_end*one_res_span) # canvas_true_width = int(queryspan_end_graphic-queryspan_start_graphic) # space_at_end = int(int(default_width)-(canvas_true_width+queryspan_start_graphic)) # Draws a gray rectangle representing the full protein sequence. canvas_plot_scene.addRect(0, 3, default_width, 5, self.full_seq_pen, self.full_seq_brush) # Draws a colored rectangle representing the domain. line_pen = QtGui.QPen(QtGui.QColor(0, 0, 0, 255), 1) qcolor = QtGui.QColor(0, 0, 0) qcolor.setNamedColor(hit['dom_color_hex']) brush = QtGui.QBrush(qcolor) canvas_plot_scene.addRect(queryspan_start_graphic, 0, queryspan_end_graphic-queryspan_start_graphic, default_height, line_pen, brush) return canvas_plot_view def toggle_domain(self, domain_idx): domain_widgets = self.domain_widgets_dict[domain_idx] if domain_widgets["rect"].isVisible(): domain_widgets["rect"].setVisible(False) else: domain_widgets["rect"].setVisible(True)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/domain_analysis_protocols/hmmscan/__init__.py
.py
20,738
489
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Module implementing hmmscan (from the hmmer package) searches in PyMod. This is used to scan profile-HMM libraries, such as the PFAM library. """ import os import json import urllib.request, urllib.error, urllib.parse from Bio import SearchIO from pymod_lib.pymod_protocols.base_protocols import PyMod_protocol from pymod_lib.pymod_protocols.domain_analysis_protocols.hmmscan._gui import Hmmscan_options_window_qt, Hmmscan_results_window_qt from pymod_lib.pymod_element_feature import Domain_feature from pymod_lib.pymod_os_specific import get_exe_file_name, check_network_connection from pymod_lib.pymod_threading import Protocol_exec_dialog from pymod_lib.pymod_exceptions import catch_protocol_exception class Hmmscan_protocol(PyMod_protocol): save_domain_files = False protocol_name = "Domain Search" def __init__(self, pymod, father_protocol, output_directory=os.path.curdir): PyMod_protocol.__init__(self, pymod, pymod.domain_analysis_dirpath) self.father_protocol = father_protocol self.query_element = self.father_protocol.pymod_element def launch_from_gui(self): if not self.check_hmmscan_program(): return None title = "HMMSCAN Options" if self.father_protocol.domain_search_mode == "remote": title = "EBI " + title self.hmmer_options_window = Hmmscan_options_window_qt(parent=self.pymod.main_window, protocol=self, submit_command=self.domain_search_state, title=title, upper_frame_title="Here you can modify the options for HMMSCAN",) self.hmmer_options_window.show() def check_hmmscan_program(self): if self.father_protocol.domain_search_mode == "local": exe_dirpath = self.pymod.hmmer_tool["exe_dir_path"].get_value() if not (exe_dirpath and os.path.isdir(exe_dirpath)): title = "Hmmer Suite Executable Error" message = ("The default Hmmer suite executables directory is missing. Please set one" " in the 'Tools -> Options' menu.") self.pymod.main_window.show_error_message(title, message) return False exe_filename = get_exe_file_name("hmmscan") exe_filepath = os.path.join(exe_dirpath, exe_filename) if not os.path.isfile(exe_filepath): title = "Hmmer Suite Executable Error" message = ("A '%s' file is missing in the Hmmer executables directory. Please specify" " in the 'Tools -> Options' menu a Hmmer executables directory where a" " '%s' file is found." % (exe_filename, exe_filename)) self.pymod.main_window.show_error_message(title, message) return False db_dirpath = self.pymod.hmmer_tool["hmmscan_db_dir_path"].get_value() if db_dirpath.replace(" ", "") == "": title = "No Database Directory" message = ("No databases directory is defined for HMMSCAN. Please define a database" " directory in the PyMod options window in order to perform a HMMSCAN search.") self.pymod.main_window.show_error_message(title, message) return False if not os.path.isdir(db_dirpath): title = "Database Directory not Found" message = ("The specified database directory does not exists. Please specify an" " existing one in the Tools -> Options menu.") self.pymod.main_window.show_error_message(title, message) return False self.hmmscan_db_list = [d for d in sorted(os.listdir(db_dirpath)) if d.endswith("hmm.h3m")] if len(self.hmmscan_db_list) == 0: title = "Hmmscan Database Error" message = ("No valid databases files were found in the hmmscan database directory." " Please refer to the PyMod manual to know how to install them.") self.pymod.main_window.show_error_message(title, message) return False return True elif self.father_protocol.domain_search_mode == "remote": if not check_network_connection("https://google.com", timeout=3): title = "Connection Error" message = ("An internet connection is not available, can not connect to the EBI" " server to run HMMSCAN.") self.pymod.main_window.show_error_message(title, message) return False return True def get_options_from_gui(self): # Gets the database on which to perform the search. self.hmmscan_db = self.hmmer_options_window.hmmer_database_rds.getvalue() if self.hmmscan_db == None: raise ValueError("Please select a HMM database perform a domain search.") # Gets the evalue cutoff. self.evalue_cutoff = self.hmmer_options_window.e_value_threshold_enf.getvalue(validate=True) @catch_protocol_exception def domain_search_state(self): """ Launched when pressing the 'SUBMIT' button in the hmmscan option window. """ #---------------------------- # Get options from the GUI. - #---------------------------- try: self.get_options_from_gui() except Exception as e: self.pymod.main_window.show_error_message("Input Error", str(e)) return None self.hmmer_options_window.destroy() #---------------------------------------- # Actually runs the searching protocol. - #---------------------------------------- # Remote. if self.father_protocol.domain_search_mode == 'remote': self.search_protocol = Hmmscan_web_parsing_protocol(self.pymod, self.father_protocol) # Local. elif self.father_protocol.domain_search_mode == 'local': self.search_protocol = Hmmscan_local_parsing_protocol(self.pymod, self.father_protocol) self.hmmscan_db = os.path.join(self.pymod.hmmer_tool["hmmscan_db_dir_path"].get_value(), self.hmmscan_db_dict[self.hmmscan_db]) if not self.pymod.use_protocol_threads: self.run_search_protocol_scan() else: if self.father_protocol.domain_search_mode == 'remote': title = "Running EBI HMMSCAN" label_text = "Connecting to the HMMSCAN EBI server. Please wait for the process to complete..." elif self.father_protocol.domain_search_mode == 'local': title = "Running HMMSCAN" label_text = "Running HMMSCAN. Please wait for the process to complete..." p_dialog = Protocol_exec_dialog(app=self.pymod.main_window, pymod=self.pymod, function=self.run_search_protocol_scan, args=(), wait_start=1, title=title, label_text=label_text) p_dialog.exec_() #---------------------------------------------------------- # Parses the results and show them in the results window. - #---------------------------------------------------------- self.parsed_res = self.run_search_protocol_parse() if not self.parsed_res: self.pymod.main_window.show_warning_message("Search completed", "No match found with enabled filters.") return None self.results_window = Hmmscan_results_window_qt(parent=self.pymod.main_window, protocol=self) self.results_window.show() def run_search_protocol_scan(self): self.domain_search_results = self.search_protocol.search_domains(self.query_element, evaluecutoff=self.evalue_cutoff, database=self.hmmscan_db) def run_search_protocol_parse(self): parser_generator = self.search_protocol.parse(self.domain_search_results) array = [] for i in parser_generator: array.append(i.copy()) sorted_array = sorted(array, key=lambda x: x['evalue']) return sorted_array #-------------------------------------------------------------------------- # Import results in PyMod. - #-------------------------------------------------------------------------- def hmmer_results_state(self): """ Called when the 'SUBMIT' button is pressed """ # For each hsp takes the state of its tkinter checkbutton. my_domains_map = [[int(v.isChecked()) for v in hit_state] for hit_state in self.results_window.domain_check_states] selection_map = [int(v.isChecked()) for v in self.results_window.color_square_lst] # If the user selected at least one HSP. if 1 in selection_map: for hit_ix in range(len(my_domains_map)): # hsp_lst = self.results_window.pfam_data[hit_ix]['location'] hsp_lst = self.parsed_res[hit_ix]['location'] for loc_ix in range(len(hsp_lst)): # coupling the HSP in self.pfam_data with the status of the checkbutton hsp_lst[loc_ix].update({'selected': my_domains_map[hit_ix][loc_ix]}) self.import_results_in_pymod() self.results_window.close() def import_results_in_pymod(self, color_sequence=True, color_structure=True): """ Actually imports the domains in Pymod and assignes information about the selected domain to the query PyMod elements and its residues. """ domain_count = 0 for d_index in range(len(self.parsed_res)): d_item = self.parsed_res[d_index] d_hsp_lst = self.parsed_res[d_index]['location'] for d in d_hsp_lst: if d['selected']: startindex = int(d['start'])-1 # lo start originale conta da 1 endindex = int(d['end']) # qui va bene, perche' l'ultimo indice e' esclusivo new_domain = Domain_feature(id=d['hsp_number_id'], # name=d['hsp_number_id'], name=d['hsp_res_id'], start=startindex, end=endindex, evalue=d['evalue'], color=d_item['dom_color'], #tupla description=d_item['desc']) if self.save_domain_files: o_fh = open(os.path.join(self.output_directory, "domain_%s.txt" % (domain_count+1)), "w") o_fh.write(json.dumps(new_domain.get_feature_dict())) o_fh.close() self.query_element.add_domain_feature(new_domain) domain_count += 1 # Sorts the domain features list of the PyMod element. self.query_element.features["domains"].sort(key=lambda x: x.start) if color_sequence: self.pymod.main_window.color_selection("single", self.query_element, "domains") # Completes the process and stores the results in PyMod. self.father_protocol.evaluate_domain_search() #------------------------------------------------------- # Methods for storing the results of a hmmscan search. - #------------------------------------------------------- def _initializes_domains_search(self, query_element, **args): self.query_element_name = self.father_protocol.query_element_name self.query_element_seq = query_element.my_sequence.replace('-', '') self.query_filepath = self.father_protocol._pymod_element_seq_filepath def get_domain_name(self, parsed_output_item, locitem): return parsed_output_item['id'] ################################################################################################### # Remote hmmscan searches. # ################################################################################################### class Hmmscan_web_parsing_protocol(Hmmscan_protocol): hmm_url = 'https://www.ebi.ac.uk/Tools/hmmer/search/hmmscan' connection_error_title = "Connection Error" connection_error_message = "Can not connect to the EBI hmmscan server. Please check your Internet access." def search_domains(self, query, evaluecutoff, database='pfam'): self._initializes_domains_search(query) self.evaluecutoff = float(evaluecutoff) self.database = database # install a custom handler to prevent following of redirects automatically. class SmartRedirectHandler(urllib.request.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): return headers opener = urllib.request.build_opener(SmartRedirectHandler()) urllib.request.install_opener(opener) parameters = {'hmmdb': database.lower(), 'seq': self.query_element_seq, } enc_params = urllib.parse.urlencode(parameters).encode('utf-8') # post the search request to the server request = urllib.request.Request(self.hmm_url, enc_params) # Get the url where the search results can be fetched from. try: results_url = urllib.request.urlopen(request).get('location') except urllib.error.URLError as e: raise e # modify the range, format and presence of alignments in your results here res_params = {'output': 'json'} # add the parameters to your request for the results enc_res_params = urllib.parse.urlencode(res_params) modified_res_url = results_url + '?' + enc_res_params # send a GET request to the server results_request = urllib.request.Request(modified_res_url) data = urllib.request.urlopen(results_request) # Saves the results. response_content = data.read().decode('utf-8') results_file_name = self.query_element_name + '_web_' + database + '_output.' + res_params['output'] results_file = os.path.join(self.output_directory, results_file_name) with open(results_file, 'w') as o_fh: o_fh.write(response_content) return results_file def parse(self, results_filepath): with open(results_filepath, "r") as r_fh: json_results = json.loads(r_fh.read()) # Gets the list of "hits" (that is, the list of domains types identified in the query). matchlist = json_results["results"]["hits"] parsed_output_item = {} for match_idx, match in enumerate(matchlist): parsed_output_item.update(_readable_attrs(match)) unique_id = parsed_output_item['id'] + '*' + str(match_idx).zfill(4) parsed_output_item.update({'unique_id': unique_id}) loclist = match["domains"] locationattrs = [] locitem = {} # Each "hit" may have different domains, for example, when a protein has multiple domains # of the same type (repeated domains). for loc_idx, loc in enumerate(loclist): locitem.update(_readable_attrs(loc)) loc_id = parsed_output_item['id'] + '_hsp_' + str(loc_idx).zfill(3) loc_res = self.get_domain_name(parsed_output_item, locitem) locitem.update({'hsp_number_id':loc_id, 'hsp_res_id':loc_res}) try: if self.database.lower() != 'gene3d': if float(locitem['evalue']) < self.evaluecutoff: locationattrs.append(locitem.copy()) # else: locationattrs.append(locitem.copy()) except AttributeError: locationattrs.append(locitem.copy()) if locationattrs: parsed_output_item.update({'location': locationattrs}) # yield parsed_output_item else: parsed_output_item = {} continue def _readable_attrs(node): """ Changes the keys of the json HMMSCAN ouptut to Biopython ones. """ conversion_standard = {'name': 'id', 'aliL': 'length', 'ievalue': 'evalue', 'ienv': 'env_start', 'jenv': 'env_end', 'iali': 'ali_start', 'jali': 'ali_end', 'alisqfrom': 'start', 'alisqto': 'end', 'alihmmfrom': 'hmm_start', 'alihmmto': 'hmm_end',} new_node = {} for k in node: if k in conversion_standard: new_node[conversion_standard[k]] = node[k] else: new_node[k] = node[k] return new_node ################################################################################################### # Local hmmscan searches. # ################################################################################################### class Hmmscan_local_parsing_protocol(Hmmscan_protocol): def search_domains(self, query_element, database, evaluecutoff): self._initializes_domains_search(query_element) self.evaluecutoff = evaluecutoff exe_filepath = os.path.join(self.pymod.hmmer_tool["exe_dir_path"].get_value(), get_exe_file_name("hmmscan")) out_filepath = os.path.join(self.output_directory, "hmmscan_out_" + self.query_element_name + ".txt") cline = [exe_filepath, "-o", out_filepath, "-E", str(evaluecutoff), database.replace('.h3m', ''), self.query_filepath] # self.pymod.new_execute_subprocess(cline) self.pymod.execute_subprocess(cline, new_shell=False) return out_filepath def parse(self, file): # parsed_output = {} parsed_output_item = {} inputfile = open(file, 'r') for qr in SearchIO.parse(inputfile, 'hmmer3-text'): for hit in qr.hits: parsed_output_item.update({'id': hit.id, 'evalue': hit.evalue, 'length': qr.seq_len, 'query_descr': qr.description, 'desc': hit.description}) unique_id = parsed_output_item['id'] + '*' + str(qr.hits.index(hit)).zfill(4) parsed_output_item.update({'unique_id': unique_id}) hhits = hit.hsps locattrs = [] locitem = {} for h in hhits: # corresponding_hit = qr[h.hit_id] locitem.update({'id': h.hit_id, 'bitscore': h.bitscore, 'evalue': h.evalue, 'evalue_cond': h.evalue_cond, 'env_start': h.env_start, 'env_end': h.env_end, 'start': int(h.query_start)+1, 'end': h.query_end, 'hmm_start': h.hit_start, 'hmm_end': h.hit_end, }) loc_id = parsed_output_item['id'] + '_hsp_' + str(hit.hsps.index(h)).zfill(3) loc_res = self.get_domain_name(parsed_output_item, locitem) locitem.update({'hsp_number_id': loc_id, 'hsp_res_id': loc_res}) if locitem['evalue'] < self.evaluecutoff: locattrs.append(locitem.copy()) parsed_output_item.update({'location': locattrs}) yield parsed_output_item inputfile.close() # return parsed_output
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/scr_find.py
.py
15,878
343
# Copyright 2020 by Dario Marzella, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import math from pymod_lib import pymod_vars from ._evolutionary_analysis_base import Evolutionary_analysis_protocol from pymod_lib.pymod_exceptions import PyModMissingStructure from pymol.Qt import QtWidgets from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_entryfield_qt, PyMod_radioselect_qt, PyMod_scalebar_qt) from pymol import cmd ################################################################################################### # SCR_FIND protocol. # ################################################################################################### class SCR_FIND_analysis(Evolutionary_analysis_protocol): def additional_initialization(self): self.verbose = False self.matrices_initialized = False self.hide_non_scrs = True def launch_from_gui(self): # Checks the elements of the alignment. child_elements = [e for e in self.input_cluster_element.get_children() if e.has_structure()] if len(child_elements) < 2: self.pymod.main_window.show_error_message("Selection Error", ("A SCR_FIND analysis can only be performed on alignments in which" " at least two elements have a 3D structure loaded in PyMOL.")) return None if any([e.polymer_type == "nucleic_acid" for e in child_elements]): self.pymod.main_window.show_error_message("Selection Error", "Can not perform the analysis for nucleic acids structures.") return None if not self.input_cluster_element.algorithm in pymod_vars.structural_alignment_tools: message = pymod_vars.structural_alignment_warning % "SCR_FIND" self.pymod.main_window.show_warning_message("Alignment Warning", message) # Shows the option window. self.build_scr_find_window() def build_scr_find_window(self): """ Builds a window with options for the SCR_FIND algorithm. """ self.scr_find_window = SCR_FIND_window_qt(self.pymod.main_window, protocol=self, title="SCR_FIND algorithm options", upper_frame_title="Here you can modify options for SCR_FIND", submit_command=self.scr_window_submit) self.scr_find_window.show() def scr_window_submit(self, value = None): if self.verbose: print("- Computing centroids. This will take a bit, but just on first SCR_FIND Submit") self.hide_non_scrs = pymod_vars.yesno_dict[self.scr_find_window.scr_find_hide_non_scrs.getvalue()] try: self.GP = self.scr_find_window.scr_find_gap_penalty_enf.getvalue(validate=True) self.score_limit = float(self.scr_find_window.sc_scale.get()) self.window_size = int(self.scr_find_window.sw_scale.get()) except Exception as e: self.pymod.main_window.show_error_message("Input Error", str(e)) return None self.scr_find_state() def scr_find_state(self): """ Called when the "SUBMIT" button is pressed on the SCR_FIND window. Contains the code to compute SCR_FIND scores using the 'SCR_FIND' class. """ if not self.matrices_initialized: if len(set([len(e.my_sequence) for e in self.input_cluster_element.get_children()])) > 1: raise Exception("The aligned sequences don't have the same length.") self.selected_elements = [e for e in self.input_cluster_element.get_children() if e.has_structure()] ################################### #Generating a list containing ordered residues coordinates ################################### self.matrix = [] self.alignment_length = len(self.selected_elements[0].my_sequence) # First get all the coordinates of the alignment elements. all_coords_dict = {} for pymod_element in self.selected_elements: residues, coords = self.get_coords_array(pymod_element=pymod_element, interaction_center="ca", get_selectors=False) all_coords_dict[pymod_element] = dict([(r.db_index, c) for (r, c) in zip(residues, coords)]) # Then build the matrix used to compute SCR scores. for ali_id in range(0, self.alignment_length): matrix_row = [] for pymod_element in self.selected_elements: pos = pymod_element.my_sequence[ali_id] if not pos in ("-", "X"): try: residue = pymod_element.get_residue_by_index(ali_id, aligned_sequence_index=True) coords = list(all_coords_dict[pymod_element][residue.db_index]) matrix_row.append((coords, residue)) except Exception as e: matrix_row.append((["bugged_residue"], None)) message = ("- WARNING: Some problems occurred with" " structure %s, aminoacid %s, alignment" " position %s: %s" % (pymod_element.my_header, pos, ali_id, str(e))) print(message) else: matrix_row.append((['-', '-', '-'], None)) self.matrix.append(matrix_row) ################################### #Generating a list containing ordered centroids coordinates ################################### self.centroid_list = [] for i in range(len(self.matrix)): x_list=[] y_list=[] z_list=[] for s in range(len(self.matrix[i])): if "bugged_residue" not in self.matrix[i][s][0]: if self.matrix[i][s][0][0] != '-': x_list.append(self.matrix[i][s][0][0]) if self.matrix[i][s][0][1] != '-': y_list.append(self.matrix[i][s][0][1]) if self.matrix[i][s][0][2] != '-': z_list.append(self.matrix[i][s][0][2]) if x_list == []: datax= '-' datay= '-' dataz= '-' else: datax= (sum(x_list))/(len(x_list)) datay= (sum(y_list))/(len(y_list)) dataz= (sum(z_list))/(len(z_list)) self.centroid_list.append([datax, datay, dataz]) self.matrices_initialized = True ################################### #Generating a SC score list ################################### # i position id, s structure, c coordinate (0 = x, 1 = y, 2 = z), N number of residues in current SCR self.score_list = [] for i in range(len(self.matrix)): dc_list = [] N=0 gaps=0 for s in range(len(self.matrix[i])): if '-' not in self.matrix[i][s][0] and "bugged_residue" not in self.matrix[i][s][0]: for c in range(0,3): dc = (self.matrix[i][s][0][c] - self.centroid_list[i][c])**2 dc_list.append(dc) N+=1 elif "-" in self.matrix[i][s][0]: gaps+=1 elif "bugged_residue" in self.matrix[i][s][0]: pass if N == 0: SC = 1000 + (gaps*(float(self.GP))) else: SC = ((math.sqrt(sum(dc_list)/(N)))+(gaps*(float(self.GP)))) for pos_in_str in self.matrix[i]: if pos_in_str[1] != None: pos_in_str[1].scr_score = {"score": SC, "interval": None} self.score_list.append(SC) ################################ #Finding SCRs with a sliding widow of lenght choosen by the user ################################ #s defines the starting position of the sliding window, e defines its end. self.SCR_list=[] s = 0 stn_dev = None while s in range((len(self.score_list))-self.window_size): e = s + self.window_size if e > (len(self.score_list)): break else: mean = (sum(self.score_list[s:e]))/(e-s) if mean <= self.score_limit: while mean <= self.score_limit and e <= (len(self.score_list)) and (stn_dev == None or stn_dev <= 4): e+=1 mean = (sum(self.score_list[s:e]))/(e-s) devs = [] for score in self.score_list[s:e]: dev = (score - mean)**2 devs.append(dev) stn_dev = math.sqrt((sum(devs)/((e-s)-1))) start = s+1 end = e-1 SCR = [start, end] self.SCR_list.append(SCR) s = e stn_dev = None else: s+=1 if self.SCR_list == []: print('- WARNING: No SCRs found! try to change your parameters!') else: for element in self.selected_elements: for residue in element.get_polymer_residues(): residue.is_scr = False ali_id = residue.get_id_in_aligned_sequence() for SCR in self.SCR_list: if SCR[0] <= ali_id+1 <= SCR[-1]: residue.is_scr = True if self.verbose: print('- SCRs list:', self.SCR_list) #defines 10 color intervals, between the lowest and the highest value for SC score in any SCR min_list = [] max_list = [] for SCR in self.SCR_list: filtered_SCR = [ i for i in (self.score_list[SCR[0]:SCR[-1]])] # if i < self.GP #da 3 in su deviazioni standard partial_min = min(filtered_SCR) partial_max = max(filtered_SCR) min_list.append(partial_min) max_list.append(partial_max) if min_list == []: self.pymod.main_window.show_error_message("No SCR found", ("Your SC score limit is below the lowest SC score in the structure." " Please increase SC score limit or decrease minimum sliding window length")) return None else: glob_min = min(min_list) glob_max = max(max_list) click = (glob_max-glob_min)/10 intervals = [] for i in range(10): intervals.append(((glob_min+(i*click)), (glob_min+((i+1)*click)))) #Assigns to each residue within ad SCR his color interval (scr_color_interval) for element in self.selected_elements: show_residue_list = [] cmd.show("cartoon", element.get_pymol_selector()) if self.hide_non_scrs: cmd.hide("everything", element.get_pymol_selector()) for residue in element.get_polymer_residues(): if residue.is_scr and residue.scr_score and residue.scr_score['score'] is not None: color = False i = 1 for interval in intervals: if min(interval) <= residue.scr_score["score"] < max(interval): residue.scr_score["interval"] = i color = True break if residue.scr_score["score"] >= max(interval) and i == 10: residue.scr_score["interval"] = 10 color = True break i += 1 if not color: residue.scr_score["interval"] = 10 show_residue_list.append(str(residue.db_index)) else: residue.scr_score = {"score":None, "interval":None} cmd.show("cartoon", "%s and resi %s" % (element.get_pymol_selector(), self.pymod.main_window._join_residues_list(show_residue_list))) for element in self.selected_elements: self.pymod.main_window.color_element_by_scr_scores(element) ################################################################################################### # GUI. # ################################################################################################### class SCR_FIND_window_qt(PyMod_protocol_window_qt): def add_middle_frame_widgets(self): # SC-score scalebar. self.sc_scale = PyMod_scalebar_qt(label_text="SC-score Limit", slider_value=3.0, slider_from=0.5, slider_to=5.0, slider_resoution=0.25, slider_tickinterval=1.0, slider_use_float=True, slider_binding=self.protocol.scr_window_submit, slider_width=375) self.middle_formlayout.add_widget_to_align(self.sc_scale) # Sliding Window size scalebar. self.sw_scale = PyMod_scalebar_qt(label_text="Sliding Window Min. Size", slider_value=3, slider_from=2, slider_to=50, slider_resoution=1, slider_tickinterval=5, slider_binding=self.protocol.scr_window_submit, slider_width=375) self.middle_formlayout.add_widget_to_align(self.sw_scale) # Gap penalty entry field. self.scr_find_gap_penalty_enf = PyMod_entryfield_qt(label_text="Gap Penalty", value='100', validate={'validator': 'integer', 'min': 0, 'max': 1000}, enter_command=self.protocol.scr_window_submit) self.middle_formlayout.add_widget_to_align(self.scr_find_gap_penalty_enf) # Hide non-SCR residues or show them white. self.scr_find_hide_non_scrs = PyMod_radioselect_qt(label_text="Hide non SCRs", buttons=('Yes', 'No')) self.scr_find_hide_non_scrs.setvalue("No") self.scr_find_hide_non_scrs.buttons_dict["No"].clicked.connect(self.protocol.scr_window_submit) self.scr_find_hide_non_scrs.buttons_dict["Yes"].clicked.connect(self.protocol.scr_window_submit) self.middle_formlayout.add_widget_to_align(self.scr_find_hide_non_scrs) self.middle_formlayout.set_input_widgets_width(110)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/weblogo.py
.py
11,424
251
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import pymod_lib.pymod_os_specific as pmos from pymol.Qt import QtWidgets from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_entryfield_qt, PyMod_radioselect_qt, PyMod_combobox_qt, PyMod_hbox_option_qt) from ._evolutionary_analysis_base import Evolutionary_analysis_protocol from ._web_services_common import Web_services_common class WebLogo_analysis(Evolutionary_analysis_protocol, Web_services_common): """ Class implementing methods for accessing the WebLogo web service. """ #Units list. units_list = ['Bits', 'Probability'] #Color scheme list. colorscheme_list = ['Auto', '(AA) Charge', '(AA) Chemistry', '(AA default) Hydrophobicity', '(NA) Classic', '(NA default) Base pairing'] #Logo Format format_list = ['PDF', 'PNG image'] def additional_initialization(self): self.ali_length = len(self.input_cluster_element.my_sequence) def launch_from_gui(self): self.build_logo_options_window() def build_logo_options_window(self): """ Displayes a window with a series of widgets through which users can define WebLogo parameters. """ self.logo_window = WebLogo_options_window_qt(self.pymod.main_window, protocol=self, title="WebLogo 3 web-application Options", upper_frame_title="Here you can modify options for WebLogo 3", submit_command=self.logo_state) self.logo_window.show() def check_logo_correct_parameters(self): ''' Checks if the values that were insert in the LOGO window are correct. ''' correct_input = True # This variable defines the status. try: # Checks if entries are integer numbers. start = int(self.logo_window.logo_start.value()) end = int(self.logo_window.logo_end.value()) # Get advanced options. if self.logo_window.showing_advanced_widgets: val = int(self.logo_window.logo_stacks_enf.getvalue()) # Check on the logic of choosing extremities. if start >= end: correct_input = False errortitle = "Input Error" errormessage = "Start value cannot be greater than the end value. Please correct." self.pymod.main_window.show_error_message(errortitle, errormessage) elif start > self.ali_length or end > self.ali_length or start < 0 or end < 0: correct_input = False errortitle = "Input Error" errormessage = "Values cannot be greater than the sequence length and both must be greater then 0. Please correct." self.pymod.main_window.show_error_message(errortitle, errormessage) except Exception as e: correct_input=False errortitle = "Input Error" errormessage = "Non valid numeric input. Please correct." self.pymod.main_window.show_error_message(errortitle, errormessage) return correct_input def logo_state(self): """ This method is called when the 'Submit' button on the LOGO window is pressed. It runs a check on the entries, if they are correct it calls the getLogo() function """ if not self.check_logo_correct_parameters(): return False self.getLogo() def getLogo(self): ''' Generates a LOGO of the alignment, by using WebLogo 3 site. Requires active Internet connection. ''' self.verbose = True #Units dictionary UNITS = {'Bits':'bits', 'Probability':'probability'} #Color scheme dictionary COLOR_SCHEME = { 'Auto':'color_auto', '(NA default) Base pairing':'color_base_pairing', '(NA) Classic':'color_classic', '(AA default) Hydrophobicity':'color_hydrophobicity', '(AA) Chemistry':'color_chemistry', '(AA) Charge':'color_charge' } #Format dictionary FORMATS = {'PNG image' : 'png_print', 'PDF' : 'pdf'} #switch format-extension extensions = {'png_print': 'png', 'pdf' : 'pdf'} logo_yesno = {"Yes": "true", "No": "false"} #Options defined in the window LOGO_UNIT = UNITS[self.logo_window.unit_combobox.get()] LOGO_COLOR = COLOR_SCHEME[self.logo_window.color_combobox.get()] LOGO_RANGE_START = self.logo_window.logo_start.value() LOGO_RANGE_END = self.logo_window.logo_end.value() #Options defined in advanced options sub-window, not always visible. Here they are initialised. LOGO_FORMAT = 'pdf' LOGO_TITLE = '' LOGO_STACKS_PER_LINE = '80' LOGO_SCALE_STACKS = 'false' LOGO_SHOW_ERRORBARS = 'false' if self.logo_window.showing_advanced_widgets: LOGO_FORMAT = FORMATS[self.logo_window.format_combobox.get()] LOGO_TITLE = self.logo_window.logo_title_enf.getvalue() LOGO_STACKS_PER_LINE = self.logo_window.logo_stacks_enf.getvalue() LOGO_SCALE_STACKS = logo_yesno[self.logo_window.scale_width_rds.getvalue()] LOGO_SHOW_ERRORBARS = logo_yesno[self.logo_window.show_error_rds.getvalue()] self.logo_window.destroy() if self.verbose: print('- Running GetLogo...') #weblogo3 URL weblogourl = 'http://weblogo.threeplusone.com/create.cgi' #Sets fields and arguments collecting values from the LOGO options window values = {'unit_name': LOGO_UNIT, 'color_scheme': LOGO_COLOR, 'logo_start': LOGO_RANGE_START, 'logo_end' : LOGO_RANGE_END, 'format': LOGO_FORMAT, 'logo_title': LOGO_TITLE, 'stacks_per_line': LOGO_STACKS_PER_LINE, 'show_xaxis': 'true', 'show_yaxis': 'true', 'show_ends': 'true', 'show_fineprint': 'true', } values_update_scale = {'scale_width': LOGO_SCALE_STACKS} values_update_errorbars = {'show_errorbars': LOGO_SHOW_ERRORBARS} if LOGO_SCALE_STACKS != 'false': values.update(values_update_scale) if LOGO_SHOW_ERRORBARS != 'false': values.update(values_update_errorbars) # Builds an url with the multiple alingment and WebLogo parameters and sends a request to # the WebLogo server. form_upload_file_name = "sequences" # 'sequences_file' upload_response = self.upload_alignment(self.input_cluster_element, weblogourl, form_upload_file_name, other_values=values, show_error=False) #Check if valid response is given if upload_response: #Writes output content in a file with extension given by LOGO_FORMAT logofile = os.path.join(self.pymod.images_dirpath, 'logo_' + str(self.pymod.logo_image_counter) + '.' + extensions[LOGO_FORMAT]) lf = open(logofile, 'wb') if self.verbose: print('- Creating file...') lf.write(upload_response) lf.close() self.pymod.logo_image_counter += 1 pmos.open_document_with_default_viewer(logofile) if self.verbose: print('- Done!') else: if self.verbose: print('- No response. Aborted.') class WebLogo_options_window_qt(PyMod_protocol_window_qt): def add_middle_frame_widgets(self): # Units combobox. self.unit_combobox = PyMod_combobox_qt(label_text="Unit Selection", items=self.protocol.units_list) self.unit_combobox.combobox.setCurrentIndex(0) self.middle_formlayout.add_widget_to_align(self.unit_combobox) # Color combobox. self.color_combobox = PyMod_combobox_qt(label_text="Color Scheme Selection", items=self.protocol.colorscheme_list) self.color_combobox.combobox.setCurrentIndex(5) self.middle_formlayout.add_widget_to_align(self.color_combobox) # Sub-frame created to display entries for Logo Range option. self.range_subframe = PyMod_hbox_option_qt(label_text="Logo Range") # Logo start position spinbox. self.logo_start = QtWidgets.QSpinBox() self.logo_start.setRange(1, self.protocol.ali_length) self.range_subframe.hbox.addWidget(self.logo_start) # Separator dash. self.logo_range_dash = QtWidgets.QLabel(" - ") self.range_subframe.hbox.addWidget(self.logo_range_dash) # Logo end position spinbox. self.logo_end = QtWidgets.QSpinBox() self.logo_end.setRange(1, self.protocol.ali_length) self.logo_end.setValue(self.protocol.ali_length) self.range_subframe.hbox.addWidget(self.logo_end) self.range_subframe.set_auto_input_widget_width() self.middle_formlayout.add_widget_to_align(self.range_subframe) # ADVANCED OPTIONS. self.show_advanced_button() # Logo format combobox. self.format_combobox = PyMod_combobox_qt(label_text='Logo Format', items=self.protocol.format_list) self.format_combobox.combobox.setCurrentIndex(0) self.middle_formlayout.add_widget_to_align(self.format_combobox, advanced_option=True) # LOGO title entry. self.logo_title_enf = PyMod_entryfield_qt(label_text="Logo Title", value='') self.middle_formlayout.add_widget_to_align(self.logo_title_enf, advanced_option=True) # Stacks per line entry. self.logo_stacks_enf = PyMod_entryfield_qt(label_text="Stacks per line", value='80', validate={'validator': 'integer', 'min': 0, 'max': 100}) self.middle_formlayout.add_widget_to_align(self.logo_stacks_enf, advanced_option=True) # Scale stacks width. self.scale_width_rds = PyMod_radioselect_qt(label_text="Scale stacks width", buttons=('Yes', 'No')) self.scale_width_rds.setvalue("No") self.middle_formlayout.add_widget_to_align(self.scale_width_rds, advanced_option=True) # Show error bars. self.show_error_rds = PyMod_radioselect_qt(label_text="Show error bars", buttons=('Yes', 'No')) self.show_error_rds.setvalue("No") self.middle_formlayout.add_widget_to_align(self.show_error_rds, advanced_option=True) self.middle_formlayout.set_input_widgets_width(175)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/_evolutionary_analysis_base.py
.py
607
15
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. from pymod_lib.pymod_protocols.base_protocols import PyMod_protocol class Evolutionary_analysis_protocol(PyMod_protocol): protocol_name = "evolutionary_analysis" def __init__(self, pymod, input_cluster_element, *args): self.input_cluster_element = input_cluster_element PyMod_protocol.__init__(self, pymod, *args)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/pair_conservation.py
.py
6,164
163
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Protocol to compute the conservation scores between a reference sequence in a multiple sequence alignment and the rest of the sequences in that alignment. """ import os import math from pymod_lib.pymod_vars import dict_of_matrices from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_radioselect_qt, PyMod_combobox_qt) from pymod_lib.pymod_protocols.evolutionary_analysis_protocols._evolutionary_analysis_base import Evolutionary_analysis_protocol class Pair_conservation_analysis(Evolutionary_analysis_protocol): """ Class implementing a PyMod protocol to score the conservation between pair of aligned residues of a multiple sequence alignment. """ def launch_from_gui(self): self.build_pc_window() def build_pc_window(self): """ Builds a window with options for the pair conservation scorer algorithm. """ self.pc_scorer_window = Pair_conservation_window_qt(parent=self.pymod.main_window, protocol=self, title="Pair Conservation options", upper_frame_title="Here you can modify options for Pair Conservation analysis", submit_command=self.pc_scorer_state) self.pc_scorer_window.show() def pc_scorer_state(self): """ Called when the "SUBMIT" button is pressed options window. Contains the the code to compute the pairwise conservation scores. """ try: # Get the options from the GUI. self.reference_id = self.pc_scorer_window.get_reference_id() self.conservation_mode = "blosum62" # Gets the list of conservation scores. There are as many values as # positions in the alignment. all_pc_scores = self.pair_conservation_scorer() # Assigns conservation scores to each one of the aligned sequences. for seq_pc_scores, seq in zip(all_pc_scores, self.input_cluster_element.get_children()): residues = seq.get_polymer_residues() rc = 0 for (r, v) in zip(seq.my_sequence, seq_pc_scores): if r != "-": residues[rc].pc_score = v rc += 1 seq._has_pc_scores = True self.pymod.main_window.color_element_by_pc_scores(seq) except Exception as e: message = "Could not compute Pair Conservation scores because of the following error: '%s'." % e self.pymod.main_window.show_error_message("Pair Conservation Error", message) # Removes the temporary alignment file. self.pc_scorer_window.close() # amino_acids = tuple("QWERTYIPASDFGHKLCVNM") # amino_acids_and_gap = tuple("QWERTYIPASDFGHKLCVNM-") def pair_conservation_scorer(self): """ Computes the pair conservation scores. Get the reference sequence, and for each of its residues, check if the corresponding residue in another sequence is conserved (according to different criteria). """ # Prepare the scoring matrix. if self.conservation_mode == "blosum62": #sub_matrix = MatrixInfo.blosum62.copy() sub_matrix = dict_of_matrices["BLOSUM62"] for pair in list(sub_matrix.keys()): value = sub_matrix[pair] sub_matrix.update({(pair[1], pair[0]): value}) else: raise KeyError("Unknown 'conservation_mode': " % self.conservation_mode) # Get the elements and check their sequences. aligned_elements = self.input_cluster_element.get_children() reference_element = aligned_elements[self.reference_id] if len(set([len(seq.my_sequence) for seq in aligned_elements])) != 1: raise ValueError("Not all sequences in the alignment have the same length.") # Actually assign the scores. ali_len = len(aligned_elements[0].my_sequence) all_pc_scores = [] for aligned_seq in aligned_elements: seq_pc_scores = [] for i in range(0, ali_len): ref_pos = reference_element.my_sequence[i] sel_pos = aligned_seq.my_sequence[i] if ref_pos != "-" and sel_pos != "-": # Modify this to include other conservation scores. if ref_pos == sel_pos: seq_pc_scores.append(2) else: score = sub_matrix.get((ref_pos, sel_pos), 0) if score > 0: seq_pc_scores.append(1) else: seq_pc_scores.append(0) else: seq_pc_scores.append(0) all_pc_scores.append(seq_pc_scores) return all_pc_scores class Pair_conservation_window_qt(PyMod_protocol_window_qt): """ Options window for MSA pair conservation calculations. """ def add_middle_frame_widgets(self): # Type of conservation measure. # TODO. pass # Reference sequence. sequences_list = [element.my_header for element in self.protocol.input_cluster_element.get_children()] self.reference_combobox = PyMod_combobox_qt(label_text="Reference Sequence", items=sequences_list) self.reference_combobox.combobox.setCurrentIndex(0) self.middle_formlayout.add_widget_to_align(self.reference_combobox) self.middle_formlayout.set_input_widgets_width(175) def get_reference_id(self): try: return self.reference_combobox.get_index() except: print("- Warning: could not obtain the reference sequence id.") return 0
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/__init__.py
.py
0
0
null
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/entropy_scorer.py
.py
10,119
237
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. """ Protocol to compute Shannon's entropy or relative entropy of a multiple sequence alignment loaded in PyMod. """ import os import math import numpy as np from Bio import AlignIO from pymod_lib.pymod_vars import yesno_dict from pymod_lib.pymod_gui.shared_gui_components_qt import PyMod_protocol_window_qt, PyMod_radioselect_qt from pymod_lib.pymod_protocols.evolutionary_analysis_protocols._evolutionary_analysis_base import Evolutionary_analysis_protocol # Amino acid frequencies found in the UniProt/SwissProt database (retrieved from: # ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta.gz # on 31/12/2019). Used to compute the relative entropy of a multiple sequence alignment. swissprot_freq_dict = {'Q': 0.03932447341942903, 'W': 0.010987376799787335, 'E': 0.06731857081478208, 'R': 0.055339639013144266, 'T': 0.053554782580885245, 'Y': 0.029203273379595142, 'I': 0.05922496839490527, 'P': 0.047360775631143305, 'A': 0.08258683854220726, 'S': 0.06632123826472652, 'D': 0.054624431023512963, 'F': 0.03865735720637375, 'G': 0.07081179428789602, 'H': 0.02275979219540371, 'K': 0.05814545801909458, 'L': 0.09654911798449017, 'C': 0.013828227451923757, 'V': 0.06864307992926043, 'N': 0.04060773272101554, 'M': 0.024151072340423636} class Entropy_analysis(Evolutionary_analysis_protocol): """ Class implementing a PyMod protocol to score the entropy of a multiple sequence alignment. """ def launch_from_gui(self): self.build_entropy_scorer_window() entropy_func_labels = ["Shannon's Entropy", "Relative Entropy"] entropy_func_names = ["entropy", "relative_entropy"] entropy_func_dict = dict(zip(entropy_func_labels, entropy_func_names)) def build_entropy_scorer_window(self): """ Builds a window with options for the entropy scorer algorithm. """ self.entropy_scorer_window = Entropy_scorer_window_qt(parent=self.pymod.main_window, protocol=self, title="Entropy analysis options", upper_frame_title="Here you can modify options for Entropy analysis", submit_command=self.entropy_scorer_state) self.entropy_scorer_window.show() def entropy_scorer_state(self): """ Called when the "SUBMIT" button is pressed on the Entropy analysis window. Contains the code to compute Shannon's entropies. """ # Saves a .fasta file for the alignment. aligned_sequences = self.input_cluster_element.get_children() self.pymod.save_alignment_fasta_file("temp", aligned_sequences) input_file_shortcut = os.path.join(self.pymod.alignments_dirpath, "temp.fasta") try: # Get the options from the GUI. toss_gaps = yesno_dict[self.entropy_scorer_window.entropy_scorer_exclude_gaps_rds.getvalue()] toss_gap_threshold = 0.25 entropy_func = self.entropy_func_dict[self.entropy_scorer_window.entropy_scorer_function_rds.getvalue()] # Gets the list of entropy score. There are as many values as positions in the alignment. entropy_scores = self.score_entropy(input_file_shortcut, toss_gaps=toss_gaps, toss_gap_threshold=toss_gap_threshold, entropy_func=entropy_func) # Filter the entropy scores. valid_entropy_scores = [i for i in entropy_scores if i is not None] if len(valid_entropy_scores) == 0: raise ValueError("All the colums have high levels of gaps (> %s)." % toss_gap_threshold) # Assigns bins to the entropy values. entropy_items = self.get_entropy_items_list(entropy_scores, entropy_func=entropy_func) # Assigns entropy scores to each one of the aligned sequences. for seq in aligned_sequences: residues = seq.get_polymer_residues() rc = 0 for (r, v) in zip(seq.my_sequence, entropy_items): if r != "-": residues[rc].entropy_score = v rc += 1 seq._has_entropy_scores = True self.pymod.main_window.color_element_by_entropy_scores(seq) except Exception as e: message = "Could not compute Entropy scores because of the following error: '%s'." % e self.pymod.main_window.show_error_message("Entropy Analysis Error", message) # Removes the temporary alignment file. os.remove(input_file_shortcut) self.entropy_scorer_window.destroy() amino_acids = tuple("QWERTYIPASDFGHKLCVNM") # amino_acids_and_gap = tuple("QWERTYIPASDFGHKLCVNM-") def score_entropy(self, alignment_filepath, toss_gaps=False, toss_gap_threshold=0.25, entropy_func="entropy", only_aa=True): """ Reads the alignment provided in the 'alignment_filepath' argument and computes the Shannon's entropy of each column in the alignment. The alignment must be in the FASTA format. """ if not entropy_func in self.entropy_func_names: raise KeyError("Unknown 'entropy_func': %s" % entropy_func) alignment = list(AlignIO.parse(alignment_filepath, "fasta"))[0] if len(set([len(seq.seq) for seq in alignment])) != 1: raise ValueError("Not all sequences in the alignment have the same length.") ali_len = len(alignment[0].seq) # Iterate through every column of the alignment. entropy_scores = [] for i in range(0, ali_len): ali_col_i = list(alignment[:,i]) # Exclude columns with too many gaps. if toss_gaps: gap_freq = ali_col_i.count("-")/float(len(ali_col_i)) if gap_freq >= toss_gap_threshold: # There are too many gaps. entropy_scores.append(None) continue # Exclude gap characters from the entropy calculations. if only_aa: ali_col_i = [p_ij for p_ij in ali_col_i if p_ij != "-"] # There is a gap-only column. if len(ali_col_i) == 0: entropy_scores.append(None) continue if entropy_func == "entropy": entropy_i = sum([self.get_information(aa, ali_col_i) for aa in self.amino_acids]) elif entropy_func == "relative_entropy": entropy_i = sum([self.get_relative_information(aa, ali_col_i) for aa in self.amino_acids]) entropy_scores.append(entropy_i) return entropy_scores def get_information(self, aa, ali_col, pseudo_count=0): aa_counts = ali_col.count(aa) if aa_counts == 0: return 0 else: aa_freq = (aa_counts + pseudo_count)/float(len(ali_col)) return -aa_freq*math.log(aa_freq) def get_relative_information(self, aa, ali_col, pseudo_count=0): aa_counts = ali_col.count(aa) if aa_counts == 0: return 0 else: aa_freq = (aa_counts + pseudo_count)/float(len(ali_col)) return aa_freq*math.log(aa_freq/swissprot_freq_dict[aa]) n_bins = 9 def get_entropy_items_list(self, entropy_scores, entropy_func="entropy"): """ Prepares dictionaries which will store the entropy score and bin for each residue. """ # Tosses alignment positions with too many gaps. clist = np.array([i for i in entropy_scores if i is not None]) if entropy_func == "relative_entropy": clist = -clist bins = np.linspace(min(clist), max(clist), num=self.n_bins+1) list_of_entropy_items = [] for pos_idx, val in enumerate(entropy_scores): # Adds 'None' values for position tossed out because of their high gap content. if val is None: list_of_entropy_items.append({"entropy-score": None, "interval": None}) else: if entropy_func == "entropy": b = self._get_bin(np.digitize(val, bins, right=True)) elif entropy_func == "relative_entropy": b = self._get_bin(np.digitize(-val, bins, right=True)) list_of_entropy_items.append({"entropy-score": round(val, 3), "interval": b}) return list_of_entropy_items def _get_bin(self, b): if b <= 0: return 1 else: return b class Entropy_scorer_window_qt(PyMod_protocol_window_qt): """ Options window for MSA entropy calculations. """ def add_middle_frame_widgets(self): # Type of entropy measure. self.entropy_scorer_function_rds = PyMod_radioselect_qt(label_text="Entropy Measure", buttons=self.protocol.entropy_func_labels) self.entropy_scorer_function_rds.setvalue(self.protocol.entropy_func_labels[0]) self.middle_formlayout.add_widget_to_align(self.entropy_scorer_function_rds) # Toss gaps. self.entropy_scorer_exclude_gaps_rds = PyMod_radioselect_qt(label_text="Toss gaps", buttons=('Yes', 'No')) self.entropy_scorer_exclude_gaps_rds.setvalue("Yes") self.middle_formlayout.add_widget_to_align(self.entropy_scorer_exclude_gaps_rds) self.middle_formlayout.set_input_widgets_width("auto", padding=10)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/tree_building.py
.py
6,700
154
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. # TODO: # - add the possibility to save trees in the phylip format. import os from pymod_lib import pymod_vars from pymod_lib.pymod_gui.shared_gui_components_qt import PyMod_protocol_window_qt, PyMod_radioselect_qt from ._evolutionary_analysis_base import Evolutionary_analysis_protocol tree_building_alg_dict = {"Neighbor Joining": "nj", "UPGMA": "upgma"} class Tree_building(Evolutionary_analysis_protocol): def launch_from_gui(self): """ It will check if a software to build a tree is available on the user's machine. """ self.tree_building_software = None can_build_tree = False if self.pymod.clustalw.tool_file_exists(): self.tree_building_software = "clustalw" can_build_tree = True elif self.pymod.muscle.tool_file_exists(): self.tree_building_software = "muscle" can_build_tree = True if can_build_tree: self.build_tree_building_window() else: title = "Tree building Error" message = "In order to build a tree out of an alignment you need to install either ClustalW or MUSCLE and specify an existent executable path of one of these tools in the PyMod Options Window." self.pymod.main_window.show_error_message(title, message) def check_tree_constructor_module(self): try: import Bio.Phylo.TreeConstruction return True except ImportError: return False def build_tree_building_window(self): """ Builds a window with options to build a tree out of an alignment. """ self.tree_building_window = Tree_building_window_qt(self.pymod.main_window, protocol=self, title="Options for Tree Building", upper_frame_title="Here you can modify options for Tree Building", submit_command=self.run_tree_building_software) self.tree_building_window.show() def run_tree_building_software(self): # Saves a temporary input alignment file. alignment_file_name = "alignment_tmp" alignment_file_path = os.path.join(self.pymod.alignments_dirpath, alignment_file_name + '.fasta') self.pymod.save_alignment_fasta_file(alignment_file_name, self.input_cluster_element.get_children()) # Get the parameters from the GUI. clustering_algorithm = self.get_clustering_algorithm() # Prepares to run the tree-building algorithm. commandline = "" output_file_path = None if self.tree_building_software == "clustalw": commandline = '"%s"' % (self.pymod.clustalw["exe_file_path"].get_value()) commandline += ' -TREE -INFILE="%s"' % (alignment_file_path) commandline += ' -OUTPUTTREE=phylip' if self.get_distance_correction_val(): commandline += ' -KIMURA' if self.get_exclude_gaps_val(): commandline += ' -TOSSGAPS' # if self.get_boostrap_val(): # commandline += ' -SEED='+str(random.randint(0,1000)) # commandline += ' -BOOTLABELS=node' if clustering_algorithm == "nj": commandline += ' -CLUSTERING=NJ' elif clustering_algorithm == "upgma": commandline += ' -CLUSTERING=UPGMA' output_file_path = os.path.join(self.pymod.alignments_dirpath, alignment_file_name + '.ph') elif self.tree_building_software == "muscle": commandline = '"%s"' % (self.pymod.muscle["exe_file_path"].get_value()) commandline += ' -maketree -in %s' % (alignment_file_path) output_file_path = os.path.join(self.pymod.alignments_dirpath, alignment_file_name + '.phy') commandline += ' -out %s' % (output_file_path) if clustering_algorithm == "nj": commandline += ' -cluster neighborjoining' elif clustering_algorithm == "upgma": pass # Actually runs the tree building algorithm. self.pymod.execute_subprocess(commandline) # Remove temporary files. new_tree_file_path = os.path.join(self.pymod.alignments_dirpath, "%s_%s_align_tree.phy" % (self.pymod.alignments_files_names, self.input_cluster_element.unique_index)) os.rename(output_file_path, new_tree_file_path) os.remove(alignment_file_path) self.tree_building_window.destroy() # Reads the output tree file with Phylo and displays its content using PyMod plotting # engine. self.pymod.show_tree(new_tree_file_path) def get_clustering_algorithm(self): return tree_building_alg_dict[self.tree_building_window.algorithm_rds.getvalue()] def get_boostrap_val(self): return pymod_vars.yesno_dict[self.tree_building_window.bootstrap_rds.getvalue()] def get_distance_correction_val(self): return pymod_vars.yesno_dict[self.tree_building_window.distance_correction_rds.getvalue()] def get_exclude_gaps_val(self): return pymod_vars.yesno_dict[self.tree_building_window.exclude_gaps_rds.getvalue()] class Tree_building_window_qt(PyMod_protocol_window_qt): def build_protocol_middle_frame(self): # Add some options. self.algorithm_rds = PyMod_radioselect_qt(label_text="Clustering Algorithm", buttons=list(sorted(tree_building_alg_dict.keys()))) self.algorithm_rds.setvalue("Neighbor Joining") self.middle_formlayout.add_widget_to_align(self.algorithm_rds) if self.protocol.tree_building_software == "clustalw": # Kimura distance correction. self.distance_correction_rds = PyMod_radioselect_qt(label_text="Use Distance Correction", buttons=('Yes', 'No')) self.distance_correction_rds.setvalue("No") self.middle_formlayout.add_widget_to_align(self.distance_correction_rds) # Toss gaps. self.exclude_gaps_rds = PyMod_radioselect_qt(label_text="Exclude Gaps", buttons=('Yes', 'No')) self.exclude_gaps_rds.setvalue("No") self.middle_formlayout.add_widget_to_align(self.exclude_gaps_rds) self.middle_formlayout.set_input_widgets_width("auto", padding=10)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/campo.py
.py
17,460
387
# Copyright 2020 by Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os from Bio import SeqIO import numpy as np from pymod_lib.pymod_vars import dict_of_matrices from pymod_lib.pymod_seq import seq_manipulation from pymod_lib.pymod_protocols.evolutionary_analysis_protocols._evolutionary_analysis_base import Evolutionary_analysis_protocol from pymod_lib import pymod_vars from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_entryfield_qt, PyMod_radioselect_qt, PyMod_combobox_qt) class CAMPO_analysis(Evolutionary_analysis_protocol): campo_matrices = ["Blosum90","Blosum80","Blosum62","Blosum50","Blosum45","PAM30","PAM120","PAM250"] campo_matrices_dict = {"Blosum62": "blosum62", "Blosum90": "blosum90", "Blosum80": "blosum80", "Blosum50": "blosum50", "Blosum45": "blosum45", "PAM30": "pam30", "PAM120": "pam120", "PAM250": "pam250"} def launch_from_gui(self): self.build_campo_window() def build_campo_window(self): """ Builds a window with options for the CAMPO algorithm. """ self.campo_window = CAMPO_options_window_qt(self.pymod.main_window, protocol=self, title="CAMPO algorithm options", upper_frame_title="Here you can modify options for CAMPO", submit_command=self.campo_state) self.campo_window.show() def campo_state(self): """ Called when the "SUBMIT" button is pressed on the CAMPO window. Contains the code to compute CAMPO scores using the 'CAMPO' class. """ input_from_gui, input_error = self.campo_window.validate_input() if input_error is not None: self.pymod.main_window.show_error_message("Input Error", input_error) return None # Saves a .fasta file for the alignment. aligned_sequences = self.input_cluster_element.get_children() self.pymod.save_alignment_fasta_file("temp", aligned_sequences) input_file_shortcut = os.path.join(self.pymod.alignments_dirpath, "temp.fasta") # Computes CAMPO scores by using the campo module. cbc = CAMPO(input_file_shortcut, **input_from_gui) cbc.compute_id_matrix() try: cbc.run_CAMPO() # Gets the list of CAMPO score. There are as many values as positions in the alignment. campo_list = cbc.get_campo_items_list() # Assigns CAMPO scores to each one of the aligned sequences. for seq in aligned_sequences: residues = seq.get_polymer_residues() rc = 0 for (r,v) in zip(seq.my_sequence, campo_list): if r != "-": residues[rc].campo_score = v rc += 1 seq._has_campo_scores = True self.pymod.main_window.color_element_by_campo_scores(seq) except Exception as e: self.pymod.main_window.show_error_message("CAMPO Error", "Could not compute CAMPO scores because of the following error: '%s'.)" % e) # Removes the temporary alignment file. os.remove(input_file_shortcut) self.campo_window.destroy() class CAMPO: """ A class to analyze a protein multiple alignment using the CAMPO algorithm first described in: - Paiardini A1, Bossa F, Pascarella S., Nucleic Acids Res. 2005 Jul 1;33(Web Server issue):W50-5. CAMPO, SCR_FIND and CHC_FIND: a suite of web tools for computational structural biology. """ def __init__(self, fasta_file_full_path, mutational_matrix = "blosum62", gap_score=-1, gap_gap_score=0, toss_gaps=True): """ Takes as an argument the file path of the .fasta format alignment file. """ self.fasta_file_full_path = fasta_file_full_path self.records = list(SeqIO.parse(self.fasta_file_full_path, "fasta")) self.num_seq= len(self.records) self.sequence_list=[] for seq_record in self.records: sequence = str(seq_record.seq) self.sequence_list.append(sequence) # Test that all sequences have the same length. same_length = None if len(set([len(s) for s in self.sequence_list])) == 1: same_length = True self.alignment_length = len(self.sequence_list[0]) else: same_length = False if not same_length: raise Exception("The aligned sequences do not have the same length.") # Prepares the substitution matrix. self.mutational_matrix = None if mutational_matrix == "blosum62": self.mutational_matrix = dict_of_matrices["BLOSUM62"] elif mutational_matrix == "blosum90": self.mutational_matrix = dict_of_matrices["BLOSUM90"] elif mutational_matrix == "blosum80": self.mutational_matrix = dict_of_matrices["BLOSUM80"] elif mutational_matrix == "blosum50": self.mutational_matrix = dict_of_matrices["BLOSUM50"] elif mutational_matrix == "blosum45": self.mutational_matrix = dict_of_matrices["BLOSUM45"] elif mutational_matrix == "pam250": self.mutational_matrix = dict_of_matrices["PAM250"] elif mutational_matrix == "pam120": self.mutational_matrix = dict_of_matrices["PAM120"] elif mutational_matrix == "pam30": self.mutational_matrix = dict_of_matrices["PAM30"] self.mutational_matrix = self.mutational_matrix.copy() # Completes the "other half" of the Biopython matrix. for pair in list(self.mutational_matrix.keys()): value = self.mutational_matrix[pair] reversed_pair = (pair[1], pair[0]) self.mutational_matrix.update({reversed_pair: value}) # Adds values for X. for r in ("C","S","T","P","A","G","N","D","E","Q","H","R","K","M","I","L","V","F","Y","W","X"): x_value = -1 self.mutational_matrix.update({(r,"X"): x_value, ("X",r): x_value}) # Adds items for residue-gaps pairs. for r in ("C","S","T","P","A","G","N","D","E","Q","H","R","K","M","I","L","V","F","Y","W","X"): self.mutational_matrix.update({(r,"-"): gap_score, ("-",r): gap_score}) # Adds values for gap-gap pair. self.mutational_matrix.update({("-","-"): gap_gap_score}) # If this variable is set to True, positions with too many gaps will not be assigned with a # CAMPO score. self.toss_gaps = toss_gaps self.toss_gap_threshold = 0.25 def compute_id_matrix(self): """ Compute the identity matrix, necessary to calculate CAMPO scores. This should be called just after an object of this class is built. """ self.id_matrix = [] for i in range (self.num_seq-1): self.id_matrix.append([]) for i in range (0, self.num_seq-1): for j in range (i+1, self.num_seq): identity = seq_manipulation.compute_sequence_identity(self.sequence_list[i], self.sequence_list[j]) self.id_matrix[i].append(identity) def run_CAMPO(self): """ Actually runs the CAMPO algorithm and stores the conservation scores for each column of the multiple alignment. """ ############################# # Italian code starts here. # ############################# self.matrice_somme=[] for i in range(0, self.num_seq-1): self.matrice_somme.append([]) # Genera una variabile denominatore che conterra' la sommatoria di tutti i valori (1-matrice[i][indice]) denominatore= 0.0 # Questo ciclo annidato risolve il numeratore della frazione contenuta nell'algoritmo di CAMPO # Inoltre aggiunge a denominatore il valore di (1-matrice[i][indice]) for i in range (0, self.num_seq-1): # Questo indice servira' per richiamare la giusta % identita' dalla matrice delle identita' indice=0 for j in range (i+1, self.num_seq): # Lista[] alla fine del ciclo conterra' in maniera ordinata i valori ottenuti dal confronto # del primo amminoacido della sequenza i con il primo della sequenza j, del secondo AA di i # col secondo AA di j e cosi' via... self.lista=[] # Questo ciclo confronta il primo amminoacido della sequenza i con il primo amminoacido della sequenza j # Ne calcola in Bscorek(ij) e lo divide per (|(Bscorek(ii)|+|Bscorek(jj)|)*(1/2) # Bl[AAi,AAj] restituisce il Bscore dello scambio AAi-->AAj # Questo risultato viene moltiplicato per 1-%identita'(ij) e successivamente aggiunto a lista[] for AAi, AAj in zip (self.sequence_list[i],self.sequence_list[j]): if AAi != '-' or AAj != '-': numeratore=0 blosum_term = (abs(self.get_match_score((AAi,AAi)))+abs(self.get_match_score((AAj,AAj)))) * float(0.5) try: numeratore= self.get_match_score((AAi,AAj)) / (blosum_term) * (1-self.id_matrix[i][indice]) except Exception: numeratore= self.get_match_score((AAi,AAj)) / (1) * (1-self.id_matrix[i][indice]) self.lista.append(round(float(numeratore),2)) else: numeratore=self.get_match_score((AAi,AAi)) self.lista.append(round(float(numeratore),2)) denominatore= denominatore+(1-self.id_matrix[i][indice]) self.matrice_somme[i].append(self.lista) indice=indice+1 # Questo controllo serve per evitare che effettui una divisione 0/0 nel caso in cui tutte le sequenze siano identiche. # Ok sara' comunque pari a zero per ogni colonna dell'allineamento poiche' tutti i valori al numeratore della sommatoria # vengono moltiplicati per 1-matrice[i][indice] che nel caso di sequenze identiche e' pari a zero. if denominatore == 0: denominatore= 0.01 # Alla fine del ciclo il primo elemento contenuto in matrice_somme[] sara la matrice contenente i valori ottenuti # dal confronto della prima sequenza con la seconda, poi con la terza e cosi via. Il secondo elemento sara' la # matrice che contiene i valori dei confronti tra la seconda sequenza con la terza, poi con la quarta e cosi via... # Questo ciclo risolve la sommatoria al numeratore dell'algoritmo di CAMPO # Il ciclo piu' esterno (i) scorre le varie colonne da sommare. Quello intermedio (z) seleziona quale delle matrici contenute # in matrice_somme sto prendendo in considerazione mentre il ciclo (j) identifica le righe di quella matrice. self.lista_somme_colonne=[] for i in range (0,len(self.sequence_list[1])): somma=0 for z in range(0, len(self.matrice_somme)): for j in range (0, len(self.matrice_somme[z])): somma=somma+self.matrice_somme[z][j][i] self.lista_somme_colonne.append(somma) # Questo ciclo calcola i punteggi Ok assegnati ad ogni colonna K dell'allineamento # valori_Ok e' la lista che conterra', in maniera ordinata, tutti i punteggi assegnati alle varie colonne dell'allineamento self.campo_scores=[] for somma_colonna in self.lista_somme_colonne: valore = (1 / (self.num_seq*(self.num_seq-1)*float(0.5))) * float(somma_colonna) / float(denominatore) self.campo_scores.append(valore) # The fraction of gaps in a column of a multiple alignment in order for it to not # be assigned a normalized CAMPO score. self.tossed_alignment_positions = [] if self.toss_gaps: for alignment_position in range(self.alignment_length): gap_count = 0 for seq in self.sequence_list: if seq[alignment_position] == "-": gap_count += 1 gap_fraction = float(gap_count)/float(self.num_seq) if gap_fraction >= self.toss_gap_threshold: self.tossed_alignment_positions.append(alignment_position) self.campo_scores[alignment_position] = None # Normalize on the maximum value. Tosses alignment positions with many gaps. campo_scores_without_none_values = [i for i in self.campo_scores if i is not None] if len(campo_scores_without_none_values) > 0: massimo = max(campo_scores_without_none_values) else: raise ValueError("All the colums have high levels of gaps (> %s)." % self.toss_gap_threshold) # massimo = max(self.campo_scores) # in python 3, this line generates a TypeError: # '>' not supported between instances of 'NoneType' and 'NoneType' self.normalized_campo_scores = [] for score in self.campo_scores: if score != None: self.normalized_campo_scores.append(score/massimo) else: self.normalized_campo_scores.append(None) ########################### # Italian code ends here. # ########################### return True def get_match_score(self,residues_pair): score = None try: score = self.mutational_matrix[residues_pair] except KeyError: score = 0 return score n_bins = 10 def get_campo_items_list(self): # Tosses alignment positions with too many gaps. clist = np.array([x for x in self.normalized_campo_scores if x != None]) bins = np.linspace(min(clist), max(clist), num=self.n_bins+1) inds = np.digitize(clist, bins, right=True) list_of_campo_items = [] for campo_score, bin_id in zip(clist, inds): list_of_campo_items.append({"campo-score": round(campo_score, 3), "interval": self._get_bin(bin_id)}) # Adds back 'None' values for position tossed out because of their high gap content. for tossed_position in self.tossed_alignment_positions: list_of_campo_items.insert(tossed_position, {"campo-score": None, "interval": None}) return list_of_campo_items def _get_bin(self, b): if b <= 0: return 1 else: return b class CAMPO_options_window_qt(PyMod_protocol_window_qt): """ Window for CAMPO options. """ def add_middle_frame_widgets(self): # Scoring matrix combobox. self.matrix_cbx = PyMod_combobox_qt(label_text="Scoring Matrix Selection", items=self.protocol.campo_matrices) self.matrix_cbx.combobox.setCurrentIndex(2) self.middle_formlayout.add_widget_to_align(self.matrix_cbx) # Gap open entryfield. self.campo_gap_penalty_enf = PyMod_entryfield_qt(label_text="Gap Score", value="-1", validate={'validator': 'integer', 'min': -1000, 'max': 0}) self.middle_formlayout.add_widget_to_align(self.campo_gap_penalty_enf) # Gap extension entryfield. self.campo_gap_to_gap_score_enf = PyMod_entryfield_qt(label_text="Gap to Gap Score", value="0", validate={'validator': 'integer', 'min': -1000, 'max': 0}) self.middle_formlayout.add_widget_to_align(self.campo_gap_to_gap_score_enf) # Toss gaps. self.campo_exclude_gaps_rds = PyMod_radioselect_qt(label_text="Toss gaps", buttons=('Yes', 'No')) self.campo_exclude_gaps_rds.setvalue("Yes") self.middle_formlayout.add_widget_to_align(self.campo_exclude_gaps_rds) self.middle_formlayout.set_input_widgets_width("auto", padding=10) def validate_input(self): params_from_gui = {} try: params_from_gui["mutational_matrix"] = self.protocol.campo_matrices_dict[self.matrix_cbx.get()] params_from_gui["gap_score"] = self.campo_gap_penalty_enf.getvalue(validate=True) params_from_gui["gap_gap_score"] = self.campo_gap_to_gap_score_enf.getvalue(validate=True) params_from_gui["toss_gaps"] = pymod_vars.yesno_dict[self.campo_exclude_gaps_rds.getvalue()] except (ValueError, KeyError) as e: return None, str(e) return params_from_gui, None
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/espript.py
.py
5,680
120
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import webbrowser from pymod_lib.pymod_gui.shared_gui_components_qt import (PyMod_protocol_window_qt, PyMod_combobox_qt) from ._evolutionary_analysis_base import Evolutionary_analysis_protocol from ._web_services_common import Web_services_common class ESPript_analysis(Evolutionary_analysis_protocol, Web_services_common): """ Class implementing methods for accessing the ESPript web service. """ def launch_from_gui(self): self.espript() def espript(self): ''' Opens in the default browser the ESPript page, with the current alignment pre-loaded. Requires active Internet connection. It needs also the Schubert server to be reachable. ''' # A list of the header names of those aligned sequences with an associated 3D structure. self.espript_structures_list = ["None"] self.espript_structure_elements_list = [None] for structure_element in [e for e in self.input_cluster_element.get_children() if e.has_structure() and e.polymer_type == "protein"]: self.espript_structures_list.append(structure_element.my_header) self.espript_structure_elements_list.append(structure_element) if len(self.espript_structures_list) <= 1: self.espript_state() else: self.show_espript_window() def show_espript_window(self): """ Displayes a window with a combobox to let users select a strucure file of which the secondary structure information will be included in ESPript output. """ self.espript_sec_str_window = Espript_options_window_qt(self.pymod.main_window, protocol=self, title="ESPript Options", upper_frame_title="Here you can modify options for ESPript", submit_command=self.espript_state) self.espript_sec_str_window.show() def espript_state(self): """ Uploads a sequence alignment file in fasta format on schubert (and optionally a structure file in the pdb format) and then opens a new tab on users' web browser with the ESPript page with the fasta (and the pdb) uploaded files a input. """ schubert_url = 'http://schubert.bio.uniroma1.it/uploader/php_upload.php' schubert_folder_url = 'http://schubert.bio.uniroma1.it/uploader/uploads/' espript_basic_url = 'http://espript.ibcp.fr/ESPript/cgi-bin/ESPript.cgi?FRAMES=YES&amp;alnfile0=' selected_structure_element = None if len(self.espript_structures_list) > 1: structure_index = self.espript_sec_str_window.espript_sec_str_combobox.get_index() selected_structure_element = self.espript_structure_elements_list[structure_index] if selected_structure_element != None: upload_response = self.upload_alignment(self.input_cluster_element, schubert_url, 'sequences_file', structure_element = selected_structure_element) else: upload_response = self.upload_alignment(self.input_cluster_element, schubert_url, 'sequences_file') if self.verbose: print('- Attempting to upload...') if len(self.espript_structures_list) > 1: self.espript_sec_str_window.destroy() #Checks if the upload is successful if upload_response: upload_response = upload_response.decode('utf-8') else: return if upload_response.startswith('TRUE'): # Raises TypeError: startswith first arg must be bytes or a tuple of bytes, not str # because urllib opens the file in bytes mode, # and so here calls bytes.startswith() and not str.startswith(). # Need to do line.startswith(b'>'), which will make '>' a bytes literal, or # decode the bytes object to produce a string. if selected_structure_element == None: uploaded_alignment_file = upload_response[6:] else: uploaded_alignment_file, uploaded_structure_file= upload_response[6:].split(",") espript_url = espript_basic_url+schubert_folder_url+uploaded_alignment_file #creates the URL if selected_structure_element != None: espript_url += ";struct1file0=%s%s" % (schubert_folder_url, uploaded_structure_file) espript_url += ";struct1chain0=%s" % (selected_structure_element.get_chain_id()) webbrowser.open(espript_url) #opens the URL if self.verbose: print('- Done') else: title = "Error" message = "Error while uploading the file. Please try again later or check your Internet connection." self.pymod.main_window.show_error_message(title, message) class Espript_options_window_qt(PyMod_protocol_window_qt): def add_middle_frame_widgets(self): # Secondary structure combobox. self.espript_sec_str_combobox = PyMod_combobox_qt(label_text="Show Secondary Structure of", items=self.protocol.espript_structures_list) self.espript_sec_str_combobox.combobox.setCurrentIndex(0) self.middle_formlayout.add_widget_to_align(self.espript_sec_str_combobox) self.middle_formlayout.set_input_widgets_width(200)
Python
3D
pymodproject/pymod
pymod3/pymod_lib/pymod_protocols/evolutionary_analysis_protocols/_web_services_common.py
.py
4,031
80
# Copyright 2020 by Maria Giulia Prado, Giacomo Janson. All rights reserved. # This code is part of the PyMod package and governed by its license. Please # see the LICENSE file that should have been included as part of this package # or the main __init__.py file in the pymod3 folder. import os import urllib.request, urllib.parse, urllib.error ################################################################################################### # WEB SERVICES. # ################################################################################################### class Web_services_common: verbose = False ################################################################# # Common methods for interacting with web services. # ################################################################# def upload_alignment(self, alignment_element, url, form_upload_file_name, structure_element = None, other_values={}, show_error=False): ''' This function creates a POST request to the URL 'url'. The 'form_upload_file_name' argument is the name of the form field that encodes the file to be uploaded. For instance: if in the upload form the field of the file is called "sequence_file", the 'form_upload_file_name' argument has to be set to 'sequence_file'. It's equivalent to the 'name' variable of the UNIX command curl: curl --form name=@content The function saves the current alignment and sends it to the server. It may also send other data, encoded in 'other_values' dictionary (a dictionary containing the parameters normally sent by compiling a form in the HTML page). This argument is optional and by default is an empty dictionary. Returns the response given by the server as a string. ''' response_content = '' #Saves alignment in FASTA format alignment_file_name = 'alignment_tmp' self.pymod.save_alignment_fasta_file(alignment_file_name, alignment_element.get_children(), first_element=structure_element) alignment_file_path = os.path.join(self.pymod.alignments_dirpath, alignment_file_name + '.fasta') #Copy file content to a string al_file = open(alignment_file_path) alignment_string = al_file.read() al_file.close() os.remove(alignment_file_path) values = {form_upload_file_name: alignment_string} # Adds other values to the url. if other_values: values.update(other_values) # Uploads also a structure file. if structure_element != None: structure_file = open(os.path.join(self.pymod.structures_dirpath, structure_element.get_structure_file())) structure_file_string = structure_file.read() dbref_line = "DBREF %s" % (structure_element.my_header).ljust(80, " ") structure_file_string = dbref_line + "\n" + structure_file_string structure_file.close() values.update({"structure_file": structure_file_string}) user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8)' headers = {'User-Agent': user_agent} try: #Creates a request data = urllib.parse.urlencode(values).encode("utf-8") # in python 3, params output from urlencode # must be encoded to bytes with 'encode' method before it is sent to urlopen as data req = urllib.request.Request(url, data, headers=headers) #Gets server response and reads it response = urllib.request.urlopen(req) response_content = response.read() except urllib.error.URLError: response_content = '' title = "Connection Error" message = "Can not access the server.\nPlease check your Internet access." self.pymod.main_window.show_error_message(title, message) return response_content
Python
3D
JuliaHealth/BloodFlowTrixi.jl
exemples/Model1DOrd2/exemple.jl
.jl
1,189
49
using Trixi using BloodFlowTrixi using OrdinaryDiffEq eq = BloodFlowEquations1D(; h = 0.1) eq_ord2 = BloodFlowEquations1DOrd2(eq) mesh = TreeMesh(0.0, 40.0, initial_refinement_level = 4, n_cells_max = 10^4, periodicity = false) bc_hypo = (; x_neg = boundary_condition_pressure_in, x_pos = Trixi.BoundaryConditionDoNothing() ) bc_parab = (; x_neg = BoundaryConditionNeumann((x,t,eq) -> SVector(0.0,0,0,0,0)), x_pos = BoundaryConditionNeumann((x,t,eq) -> SVector(0.0,0,0,0,0)) ) solver = DGSEM(polydeg = 2, surface_flux = (flux_lax_friedrichs,flux_nonconservative), volume_integral = VolumeIntegralFluxDifferencing((flux_lax_friedrichs,flux_nonconservative)) ) semi = SemidiscretizationHyperbolicParabolic( mesh, (eq,eq_ord2), initial_condition_simple, source_terms = source_term_simple_ord2, solver, boundary_conditions = (bc_hypo,bc_parab) ) tspan = (0.0, 0.3) ode = semidiscretize(semi, tspan) dt_adapt = StepsizeCallback(;cfl=0.5) analyse = AliveCallback( alive_interval = 10, analysis_interval = 100 ) cb = CallbackSet( dt_adapt,analyse ) sol = solve(ode, SSPRK33(),dt = dt_adapt(ode),callback= cb)
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
exemples/Model1D/exemple.jl
.jl
945
43
using Trixi using BloodFlowTrixi using OrdinaryDiffEq eq = BloodFlowEquations1D(; h = 0.1) mesh = TreeMesh(0.0, 40.0, initial_refinement_level = 4, n_cells_max = 10^4, periodicity = false) bc = (; x_neg = boundary_condition_pressure_in, x_pos = Trixi.BoundaryConditionDoNothing() ) solver = DGSEM(polydeg = 2, surface_flux = (flux_lax_friedrichs,flux_nonconservative), volume_integral = VolumeIntegralFluxDifferencing((flux_lax_friedrichs,flux_nonconservative)) ) semi = SemidiscretizationHyperbolic( mesh, eq, initial_condition_simple, source_terms = source_term_simple, solver, boundary_conditions = bc ) tspan = (0.0, 0.3) ode = semidiscretize(semi, tspan) dt_adapt = StepsizeCallback(;cfl=0.5) analyse = AliveCallback( alive_interval = 10, analysis_interval = 100 ) cb = CallbackSet( dt_adapt,analyse ) sol = solve(ode, SSPRK33(),dt = dt_adapt(ode),callback= cb)
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
exemples/Model2D/exemple.jl
.jl
1,020
47
using Trixi using BloodFlowTrixi using OrdinaryDiffEq eq = BloodFlowEquations2D(; h = 0.1) mesh = P4estMesh( (1,2), polydeg = 1, periodicity = (true,false), coordinates_min = (0.0,0.0), coordinates_max = (2*pi,40.0), initial_refinement_level = 4 ) bc = Dict( :y_neg => boundary_condition_pressure_in, :y_pos => Trixi.BoundaryConditionDoNothing() ) solver = DGSEM(polydeg = 1, surface_flux = (flux_lax_friedrichs,flux_nonconservative), volume_integral = VolumeIntegralFluxDifferencing((flux_lax_friedrichs,flux_nonconservative)) ) semi = SemidiscretizationHyperbolic( mesh, eq, initial_condition_simple, source_terms = source_term_simple, solver, boundary_conditions = bc ) tspan = (0.0, 0.3) ode = semidiscretize(semi, tspan) dt_adapt = StepsizeCallback(;cfl=0.5) analyse = AliveCallback( alive_interval = 10, analysis_interval = 100 ) cb = CallbackSet( dt_adapt,analyse ) sol = solve(ode, SSPRK33(),dt = dt_adapt(ode),callback= cb)
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
exemples/Model2D/diexemple.jl
.jl
1,441
61
using Trixi using OrdinaryDiffEq using DataInterpolations using BloodFlowTrixi using StaticArrays, LinearAlgebra using QuadGK using LinearAlgebra eq = BloodFlowEquations2D(; h = 0.1) xyz_data = [SA[cos(0.2*si),sin(0.2*si),si] for si in range(0,40,100)] curve = interpolate_curve(xyz_data) L = curve.t[end-1] println("curve length : $L") BloodFlowTrixi.curvature(s) = norm(DataInterpolations.derivative(curve,s,2)) mesh = P4estMesh( (1,2), polydeg = 1, periodicity = (true,false), coordinates_min = (0.0,0.0), coordinates_max = (2*pi,L), initial_refinement_level = 4 ) bc = Dict( :y_neg => boundary_condition_pressure_in, :y_pos => Trixi.BoundaryConditionDoNothing() ) solver = DGSEM(polydeg = 1, surface_flux = (flux_lax_friedrichs,flux_nonconservative), volume_integral = VolumeIntegralFluxDifferencing((flux_lax_friedrichs,flux_nonconservative)) ) semi = SemidiscretizationHyperbolic( mesh, eq, initial_condition_simple, source_terms = source_term_simple, solver, boundary_conditions = bc ) tspan = (0.0, 0.3) ode = semidiscretize(semi, tspan) dt_adapt = StepsizeCallback(;cfl=0.5) analyse = AliveCallback( alive_interval = 10, analysis_interval = 100 ) cb = CallbackSet( dt_adapt,analyse ) sol = solve(ode, SSPRK33(),dt = dt_adapt(ode),callback= cb,saveat = 0.03,save_everystep = false) # artery center-line res = get3DData(eq,xyz_data,semi,sol,1)
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/BloodFlowTrixi.jl
.jl
1,243
23
# should you ask why the last line of the docstring looks like that: # it will show the package path when help on the package is invoked like help?> BloodFlowTrixi # but will interpolate to an empty string on CI server, preventing appearing the path in the documentation built there """ Package BloodFlowTrixi v$(pkgversion(BloodFlowTrixi)) This package implements 1D and 2D blood flow models for arterial circulation using Trixi.jl, enabling efficient numerical simulation and analysis. Docs under https://yolhan83.github.io/BloodFlowTrixi.jl $(isnothing(get(ENV, "CI", nothing)) ? ("\n" * "Package local path: " * pathof(BloodFlowTrixi)) : "") """ module BloodFlowTrixi using Trixi,WriteVTK,StaticArrays # Write your package code here. abstract type AbstractBloodFlowEquations{NDIMS, NVARS} <:Trixi.AbstractEquations{NDIMS, NVARS} end include("1DModel/1dmodel.jl") include("2DModel/2dmodel.jl") export BloodFlowEquations1D,BloodFlowEquations1DOrd2, BloodFlowEquations2D,flux_nonconservative,source_term_simple,source_term_simple_ord2,boundary_condition_pressure_in,initial_condition_simple,friction,pressure,radius,boundary_condition_outflow,boundary_condition_slip_wall,get3DData,interpolate_curve end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/2DModel/bc2d.jl
.jl
3,749
81
@doc raw""" boundary_condition_outflow(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations2D) Applies an outflow boundary condition for the 2D blood flow model without reflecting any flux. ### Parameters - `u_inner`: Inner state vector at the boundary. - `orientation_or_normal`: Orientation index or normal vector indicating the boundary direction. - `direction`: Index indicating the spatial direction (1 for \( \theta \)-direction, otherwise \( s \)-direction). - `x`: Position vector at the boundary. - `t`: Time value. - `surface_flux_function`: Function to compute the surface flux. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Boundary flux as an `SVector`. """ function boundary_condition_outflow(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations2D) # Calculate the boundary flux without reflection flux1 = surface_flux_function[1](u_inner, u_inner, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_inner, orientation_or_normal, eq) return flux1,flux2 end @doc raw""" boundary_condition_outflow(u_inner, orientation_or_normal, x, t, surface_flux_function, eq::BloodFlowEquations2D) Applies an outflow boundary condition for the 2D blood flow model without reflecting any flux. This version does not use a specific direction parameter. ### Parameters - `u_inner`: Inner state vector at the boundary. - `orientation_or_normal`: Orientation index or normal vector indicating the boundary direction. - `x`: Position vector at the boundary. - `t`: Time value. - `surface_flux_function`: Function to compute the surface flux. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Boundary flux as an `SVector`. """ function boundary_condition_outflow(u_inner, orientation_or_normal, x, t, surface_flux_function, eq::BloodFlowEquations2D) # Calculate the boundary flux without reflection flux1 = surface_flux_function[1](u_inner, u_inner, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_inner, orientation_or_normal, eq) return flux1,flux2 end @doc raw""" boundary_condition_slip_wall(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations2D) Applies a slip-wall boundary condition for the 2D blood flow model by reflecting the normal component of the velocity at the boundary. ### Parameters - `u_inner`: Inner state vector at the boundary. - `orientation_or_normal`: Orientation index or normal vector indicating the boundary direction. - `direction`: Index indicating the spatial direction (1 for \( \theta \)-direction, otherwise \( s \)-direction). - `x`: Position vector at the boundary. - `t`: Time value. - `surface_flux_function`: Function to compute the surface flux. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Boundary flux as an `SVector`. """ function boundary_condition_slip_wall(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations2D) # Create the external boundary solution state with reflected normal velocity u_boundary = SVector(u_inner[1], -u_inner[2], u_inner[3], u_inner[4]) # Calculate the boundary flux based on direction if iseven(direction) flux1 = surface_flux_function[1](u_inner, u_boundary, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_boundary, orientation_or_normal, eq) else flux1 = surface_flux_function[1](u_boundary, u_inner, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_boundary, u_inner, orientation_or_normal, eq) end return flux1,flux2 end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/2DModel/2dmodel.jl
.jl
10,029
265
@doc raw""" BloodFlowEquations2D(;h,rho=1.0,xi=0.25) Defines the two-dimensional blood flow equations derived from the Navier-Stokes equations in curvilinear coordinates under the thin-artery assumption. This model describes the dynamics of blood flow along a compliant artery in two spatial dimensions (s, θ). ### Parameters - `h::T`: Wall thickness of the artery. - `rho::T`: Fluid density (default 1.0). - `xi::T`: Poisson's ratio (default 0.25). - `nu::T`: Viscosity coefficient. The governing equations account for conservation of mass and momentum, incorporating the effects of arterial compliance, curvature, and frictional losses. ```math \left\{\begin{aligned} \frac{\partial a}{\partial t} + \frac{\partial}{\partial \theta}\left( \frac{Q_{R\theta}}{A} \right) + \frac{\partial}{\partial s}(Q_s) &= 0 \\ \frac{\partial Q_{R\theta}}{\partial t} + \frac{\partial}{\partial \theta}\left(\frac{Q_{R\theta}^2}{2A^2} + A P(a)\right) + \frac{\partial}{\partial s}\left( \frac{Q_{R\theta}Q_s}{A} \right) &= P(a) \frac{\partial A}{\partial \theta} - 2 R k \frac{Q_{R\theta}}{A} + \frac{2R}{3} \mathcal{C}\sin \theta \frac{Q_s^2}{A} \\ \frac{\partial Q_{s}}{\partial t} + \frac{\partial}{\partial \theta}\left(\frac{Q_{R\theta} Q_s}{A^2} \right) + \frac{\partial}{\partial s}\left( \frac{Q_s^2}{A} - \frac{Q_{R\theta}^2}{2A^2} + A P(a) \right) &= P(a) \frac{\partial A}{\partial s} - R k \frac{Q_s}{A} - \frac{2R}{3} \mathcal{C}\sin \theta \frac{Q_s Q_{R\theta}}{A^2} \\ P(a) &= P_{ext} + \frac{Eh}{\sqrt{2}\left(1-\xi^2\right)}\frac{\sqrt{A} - \sqrt{A_0}}{A_0} \\ R &= \sqrt{2A} \end{aligned}\right. ``` """ struct BloodFlowEquations2D{T<:Real} <: AbstractBloodFlowEquations{2,5} h ::T # Wall thickness rho::T # Fluid density xi::T # Poisson's ratio nu::T # Viscosity coefficient end function BloodFlowEquations2D(;h,rho=1.0,xi=0.25,nu=0.04) return BloodFlowEquations2D(h,rho,xi,nu) end Trixi.have_nonconservative_terms(::BloodFlowEquations2D) = Trixi.True() @doc raw""" Trixi.flux(u, orientation::Integer, eq::BloodFlowEquations2D) Computes the flux vector for the conservation laws of the 2D blood flow model in either the \( \theta \)-direction or the \( s \)-direction, depending on the specified orientation. ### Parameters - `u`: State vector. - `orientation::Integer`: Direction of the flux computation (1 for \( \theta \)-direction, otherwise \( s \)-direction). - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Flux vector as an `SVector`. """ function Trixi.flux(u, orientation::Integer, eq::BloodFlowEquations2D) P = pressure(u, eq) # Compute pressure from state vector a, QRθ, Qs, E, A0 = u QRθ = 0.0 A = a + A0 # Total cross-sectional area if orientation == 1 # Flux in θ-direction f1 = QRθ / A f2 = QRθ^2 / (2 * A^2) + A * P f3 = QRθ * Qs / A^2 return SVector(f1, f2, f3, 0, 0) else # Flux in s-direction f1 = Qs f2 = QRθ * Qs / A f3 = Qs^2 / A - QRθ^2/(2*A^2)+ A * P return SVector(f1, f2, f3, 0, 0) end end @doc raw""" Trixi.flux(u, normal, eq::BloodFlowEquations2D) Computes the flux vector for the conservation laws of the 2D blood flow model based on a normal vector. ### Parameters - `u`: State vector. - `normal`: Normal vector indicating the direction of the flux. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Flux vector as an `SVector`. """ function Trixi.flux(u, normal, eq::BloodFlowEquations2D) P = pressure(u, eq) # Compute pressure from state vector a, QRθ, Qs, E, A0 = u A = a + A0 # Total cross-sectional area # if normal == 1 # Flux in θ-direction f1 = QRθ / A f2 = QRθ^2 / (2 * A^2) + A * P f3 = QRθ * Qs / A^2 fl1 = SVector(f1, f2, f3, 0, 0) # else # Flux in s-direction f1 = Qs f2 = QRθ * Qs / A f3 = Qs^2 / A - QRθ^2/(2*A^2)+ A * P fl2 = SVector(f1, f2, f3, 0, 0) return fl1 .* normal[1] .+ fl2 .* normal[2] end @doc raw""" flux_nonconservative(u_ll, u_rr, orientation::Integer, eq::BloodFlowEquations2D) Computes the non-conservative flux for the 2D blood flow model based on the orientation. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `orientation::Integer`: Direction index for the flux (1 for \( \theta \)-direction, otherwise \( s \)-direction). - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Non-conservative flux vector. """ function flux_nonconservative(u_ll, u_rr, orientation::Integer, eq::BloodFlowEquations2D) T = eltype(u_ll) p_ll = pressure(u_ll, eq) p_rr = pressure(u_rr, eq) pmean = (p_ll + p_rr) / 2 # Compute average pressure a_ll, _, _, _, A0_ll = u_ll a_rr, _, _, _, A0_rr = u_rr A_ll = a_ll + A0_ll A_rr = a_rr + A0_rr Ajump = A_rr - A_ll # Compute jump in area if orientation == 1 return SVector(zero(T), -pmean * Ajump, 0, 0, 0) else return SVector(zero(T), 0, -pmean * Ajump, 0, 0) end end @doc raw""" flux_nonconservative(u_ll, u_rr, normal, eq::BloodFlowEquations2D) Computes the non-conservative flux for the 2D blood flow model based on a normal vector. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `normal`: Normal vector indicating the direction of the flux. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Non-conservative flux vector. """ function flux_nonconservative(u_ll, u_rr, normal, eq::BloodFlowEquations2D) T = eltype(u_ll) p_ll = pressure(u_ll, eq) p_rr = pressure(u_rr, eq) pmean = (p_ll + p_rr) / 2 # Compute average pressure a_ll, _, _, _, A0_ll = u_ll a_rr, _, _, _, A0_rr = u_rr A_ll = a_ll + A0_ll A_rr = a_rr + A0_rr Ajump = A_rr - A_ll # Compute jump in area # if orientation == 1 fn1 = SVector(zero(T), -pmean * Ajump, 0, 0, 0) # else fn2 = SVector(zero(T), 0, -pmean * Ajump, 0, 0) # end return @. fn1*normal[1] + fn2*normal[2] end @doc raw""" Trixi.max_abs_speed_naive(u_ll, u_rr, orientation::Integer, eq::BloodFlowEquations2D) Computes the maximum absolute speed for wave propagation in the 2D blood flow model using a naive approach, based on the given orientation. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `orientation::Integer`: Direction index for the speed computation (1 for \( \theta \)-direction, otherwise \( s \)-direction). - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Maximum absolute speed. """ function Trixi.max_abs_speed_naive(u_ll, u_rr, orientation::Integer, eq::BloodFlowEquations2D) a_ll, QRθ_ll, Qs_ll, _, A0_ll = u_ll a_rr, QRθ_rr, Qs_rr, _, A0_rr = u_rr A_ll = a_ll + A0_ll A_rr = a_rr + A0_rr pp_ll = pressure_der(u_ll, eq) pp_rr = pressure_der(u_rr, eq) if orientation == 1 return max(max(abs(QRθ_ll)/A_ll^2, abs(QRθ_rr)/A_rr^2), max(sqrt(pp_ll), sqrt(pp_rr))) else ws_ll = Qs_ll / A_ll ws_rr = Qs_rr / A_rr return max(abs(ws_ll), abs(ws_rr)) + max(sqrt(A_ll * pp_ll), sqrt(A_rr * pp_rr)) end end @doc raw""" Trixi.max_abs_speed_naive(u_ll, u_rr, normal, eq::BloodFlowEquations2D) Computes the maximum absolute speed for wave propagation in the 2D blood flow model using a naive approach, based on a normal vector. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `normal`: Normal vector indicating the direction of wave propagation. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Maximum absolute speed. """ function Trixi.max_abs_speed_naive(u_ll, u_rr, normal, eq::BloodFlowEquations2D) a_ll, QRθ_ll, Qs_ll, _, A0_ll = u_ll a_rr, QRθ_rr, Qs_rr, _, A0_rr = u_rr A_ll = a_ll + A0_ll A_rr = a_rr + A0_rr pp_ll = pressure_der(u_ll, eq) pp_rr = pressure_der(u_rr, eq) ws_ll = Qs_ll / A_ll ws_rr = Qs_rr / A_rr return max( abs(ws_ll*normal[2] + sqrt(A_ll*pp_ll)*sqrt(normal[1]^2/A_ll + normal[2]^2)), abs(ws_rr*normal[2] + sqrt(A_rr*pp_rr)*sqrt(normal[1]^2/A_rr + normal[2]^2)), abs(ws_ll*normal[2] + QRθ_ll/A_ll^2*normal[1]), abs(ws_rr*normal[2] + QRθ_rr/A_rr^2*normal[1]) ) end @doc raw""" Trixi.max_abs_speeds(u, eq::BloodFlowEquations2D) Computes the maximum absolute speeds for wave propagation in the 2D blood flow model in both \( \theta \)- and \( s \)-directions. ### Parameters - `u`: State vector. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Tuple containing the maximum absolute speeds in the \( \theta \)- and \( s \)-directions. """ function Trixi.max_abs_speeds(u,eq::BloodFlowEquations2D) a,QRθ,Qs,E,A0 = u A = a+A0 pp= pressure_der(u,eq) return max(abs(QRθ/A^2),sqrt(pp)),abs(Qs/A) + sqrt(A*pp) end @doc raw""" (dissipation::Trixi.DissipationLocalLaxFriedrichs)(u_ll, u_rr, orientation_or_normal_direction, eq::BloodFlowEquations2D) Computes the dissipation term using the Local Lax-Friedrichs method for the 2D blood flow model. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `orientation_or_normal_direction`: Orientation index or normal vector indicating the direction of dissipation. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Dissipation vector. """ function (dissipation::Trixi.DissipationLocalLaxFriedrichs)(u_ll, u_rr, orientation_or_normal_direction, eq::BloodFlowEquations2D) λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, eq) diss = -0.5 .* abs(λ) .* (u_rr .- u_ll) # Compute dissipation term return SVector(diss[1], diss[2], diss[3], 0, 0) end include("./variables.jl") include("./bc2d.jl") include("./Test_Cases/pressure_in.jl") include("./Test_Cases/convergence_test.jl") include("./viz.jl")
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/2DModel/viz.jl
.jl
7,237
155
@doc raw""" get3DData(eq::BloodFlowEquations2D,curve::F1,er::F2,semi,sol,time_index ::Int = 1;vtk ::Bool=false,out ::T="./datas") where {T<:AbstractString,F1<:Function,F2<:Function} Generates 3D spatial data from a 2D blood flow model for visualization. This function extracts unique node coordinates, computes relevant flow parameters, and generates a 3D representation of the arterial domain using cylindrical coordinates. Optionally, it can export the data in VTK format. ### Parameters - `eq::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D` representing the blood flow model. - `curve::F1`: Function representing the curve of the vessel (s)->curve(s). - `er::F2`: Function representing the radial vector (theta,s)->er(theta,s). - `semi`: Semi-discretization structure containing mesh and numerical information. - `sol`: Solution array containing the numerical state variables. - `time_index::Int=1`: Time step index for extracting the solution (default: 1). - `vtk::Bool=false`: Whether to export data to VTK format (default: `false`). - `out::T="./datas"`: Output directory for VTK files (default: `"./datas"`). ### Returns Named tuple containing: - `x`: X-coordinates of the generated 3D points. - `y`: Y-coordinates of the generated 3D points. - `z`: Z-coordinates of the generated 3D points. - `A`: Cross-sectional areas at each point. - `wtheta`: Flow angular velocity at each point. - `ws`: Flow axial velocity at each point. """ function get3DData(eq::BloodFlowEquations2D,curve::F1,er::F2,semi,sol,time_index ::Int = 1;vtk ::Bool=false,out ::T="./datas") where {T<:AbstractString,F1<:Function,F2<:Function} thetaval = semi.cache.elements.node_coordinates[1,:,:,:] sval = semi.cache.elements.node_coordinates[2,:,:,:] # Get unique values soltime = sol[time_index] aval =@view(soltime[1:5:end]) Qthval =@view(soltime[2:5:end]) Qsval =@view(soltime[3:5:end]) Eval =@view(soltime[4:5:end]) A0val = @view(soltime[5:5:end]) Pval = map((a,Qth,Qs,E,A0)->BloodFlowTrixi.pressure(SA[a,Qth,Qs,E,A0],eq),aval,Qthval,Qsval,Eval,A0val) M(theta,s,R) = curve(s) .+ R.*er(theta,s) Typ = eltype(er(sval[1],thetaval[1])) s = length(thetaval) x = zeros(Typ,s) y = zeros(Typ,s) z = zeros(Typ,s) A = zeros(Typ,s) wtheta = zeros(Typ,s) ws = zeros(Typ,s) P = zeros(Typ,s) c=1 for i in eachindex(thetaval,sval,aval,Qthval,Qsval,A0val,Pval) thvali = thetaval[i] svali = sval[i] Avali = aval[i] + A0val[i] Rvali = sqrt(2*Avali) wthetavali = Typ(4/3*(Qthval[i]/Rvali)/Avali) wthetavali =Qthval[i]/Avali wsvali = Qsval[i]/Avali Pvali = Pval[i] xi,yi,zi = M(thvali,svali,Rvali) x[c] = xi y[c] = yi z[c] = zi A[c] = Avali wtheta[c] = wthetavali ws[c] = wsvali P[c] = Pvali c+=1 end if vtk npoints = length(x) cells = [MeshCell(VTKCellTypes.VTK_VERTEX, (i, )) for i = 1:npoints] vtk_grid(joinpath(out,"./points$time_index"), x, y, z, cells) do vtk vtk["Area", VTKPointData()] = A vtk["Angular_Speed", VTKPointData()] = wtheta vtk["Axial", VTKPointData()] = ws vtk["Pressure", VTKPointData()] = P end end return (;x=x,y=y,z=z,A=A,wtheta=wtheta,ws=ws) end @doc raw""" get3DData(eq::BloodFlowEquations2D,curve::F1,tanj::F2,nor::F3,semi,sol,time_index ::Int = 1;vtk ::Bool=false,out ::T="./datas") where {T<:AbstractString,F1<:Function,F2<:Function,F3<:Function} Generates 3D spatial data from a 2D blood flow model for visualization. This function extracts unique node coordinates, computes relevant flow parameters, and generates a 3D representation of the arterial domain using cylindrical coordinates. Optionally, it can export the data in VTK format. ### Parameters - `eq::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D` representing the blood flow model. - `curve::F1`: Function representing the curve of the vessel (s)->curve(s). - `tanj::F2`: Function representing the tanjent vector (s)->tanj(s). - `nor::F2`: Function representing the normal vector (s)->nor(s). - `semi`: Semi-discretization structure containing mesh and numerical information. - `sol`: Solution array containing the numerical state variables. - `time_index::Int=1`: Time step index for extracting the solution (default: 1). - `vtk::Bool=false`: Whether to export data to VTK format (default: `false`). - `out::T="./datas"`: Output directory for VTK files (default: `"./datas"`). ### Returns Named tuple containing: - `x`: X-coordinates of the generated 3D points. - `y`: Y-coordinates of the generated 3D points. - `z`: Z-coordinates of the generated 3D points. - `A`: Cross-sectional areas at each point. - `wtheta`: Flow angular velocity at each point. - `ws`: Flow axial velocity at each point. """ function get3DData(eq::BloodFlowEquations2D,curve::F1,tanj::F2,nor::F3,semi,sol,time_index ::Int = 1;vtk ::Bool=false,out ::T="./datas") where {T<:AbstractString,F1<:Function,F2<:Function,F3<:Function} ∧(v,w) = SA[v[2]*w[3]-v[3]*w[2],v[3]*w[1]-v[1]*w[3],v[1]*w[2]-v[2]*w[1]] er(theta,s) = cos(theta).*nor(s) .+ sin(theta).*∧(tanj(s),nor(s)) return get3DData(eq,curve,er,semi,sol,time_index;vtk=vtk,out=out) end @doc raw""" get3DData(eq::BloodFlowEquations2D,semi,sol,time_index ::Int = 1;vtk ::Bool=false,out ::T="./datas") where {T<:AbstractString,F1<:Function,F2<:Function,F3<:Function} Generates 3D spatial data from a 2D blood flow model for visualization. This will use a straight vessel. This function extracts unique node coordinates, computes relevant flow parameters, and generates a 3D representation of the arterial domain using cylindrical coordinates. Optionally, it can export the data in VTK format. ### Parameters - `eq::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D` representing the blood flow model. - `semi`: Semi-discretization structure containing mesh and numerical information. - `sol`: Solution array containing the numerical state variables. - `time_index::Int=1`: Time step index for extracting the solution (default: 1). - `vtk::Bool=false`: Whether to export data to VTK format (default: `false`). - `out::T="./datas"`: Output directory for VTK files (default: `"./datas"`). ### Returns Named tuple containing: - `x`: X-coordinates of the generated 3D points. - `y`: Y-coordinates of the generated 3D points. - `z`: Z-coordinates of the generated 3D points. - `A`: Cross-sectional areas at each point. - `wtheta`: Flow angular velocity at each point. - `ws`: Flow axial velocity at each point. """ function get3DData(eq::BloodFlowEquations2D,semi,sol,time_index ::Int = 1;vtk ::Bool=false,out ::T="./datas") where {T<:AbstractString} curve(s) = SA[s,0.0,0.0] tanj(s) = SA[1.0,0.0,0.0] nor(s) = SA[0.0,1.0,0.0] ∧(v,w) = SA[v[2]*w[3]-v[3]*w[2],v[3]*w[1]-v[1]*w[3],v[1]*w[2]-v[2]*w[1]] er(theta,s) = cos(theta).*nor(s) .+ sin(theta).*∧(tanj(s),nor(s)) return get3DData(eq,curve,er,semi,sol,time_index;vtk=vtk,out=out) end function interpolate_curve(xyz_data) @error "add DataInterpolations.jl to use this function" end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/2DModel/variables.jl
.jl
6,345
250
@doc raw""" Trixi.varnames(::typeof(cons2cons), ::BloodFlowEquations2D) Returns the variable names in conservative form for the 2D blood flow model. ### Parameters - `::typeof(cons2cons)`: Type representing the conservative variables. - `::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Tuple containing the names of the conservative variables. """ Trixi.varnames(::typeof(cons2cons), ::BloodFlowEquations2D) = ("a", "QRθ", "Qs", "E", "A0") @doc raw""" Trixi.varnames(::typeof(cons2prim), ::BloodFlowEquations2D) Returns the variable names in primitive form for the 2D blood flow model. ### Parameters - `::typeof(cons2prim)`: Type representing the primitive variables. - `::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Tuple containing the names of the primitive variables. """ Trixi.varnames(::typeof(cons2prim), ::BloodFlowEquations2D) = ("A", "wθ", "ws", "P", "A0") @doc raw""" Trixi.varnames(::typeof(cons2entropy), ::BloodFlowEquations2D) Returns the variable names in entropy form for the 2D blood flow model. ### Parameters - `::typeof(cons2entropy)`: Type representing the entropy variables. - `::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Tuple containing the names of the entropy variables. """ Trixi.varnames(::typeof(cons2entropy), ::BloodFlowEquations2D) = ("A", "wθ", "ws", "En", "A0") @doc raw""" Trixi.prim2cons(u, eq::BloodFlowEquations2D) Converts the primitive variables to conservative variables for the 2D blood flow model. ### Parameters - `u`: State vector in primitive form. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns State vector in conservative form as an `SVector`. """ function Trixi.prim2cons(u, eq::BloodFlowEquations2D) A, wθ, ws, P, A0 = u a = A - A0 QRθ = wθ * A * sqrt(2 * A) * 3 / 4 Qs = ws * A E = P * sqrt(2) * A0 / (sqrt(A) - sqrt(A0)) * (1 - eq.xi^2) / eq.h return SVector(a, QRθ, Qs, E, A0) end @doc raw""" Trixi.cons2prim(u, eq::BloodFlowEquations2D) Converts the conservative variables to primitive variables for the 2D blood flow model. ### Parameters - `u`: State vector in conservative form. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns State vector in primitive form as an `SVector`. """ function Trixi.cons2prim(u, eq::BloodFlowEquations2D) a, QRθ, Qs, E, A0 = u A = a + A0 R = radius(u, eq) ws = Qs / A wθ = (4 / 3 * QRθ / R) / A R0 = sqrt(2 * A0) η = R - R0 P = pressure(u, eq) return SVector(A, wθ, ws, P, A0) end @doc raw""" Trixi.cons2entropy(u, eq::BloodFlowEquations2D) Converts the conservative variables to entropy variables for the 2D blood flow model. ### Parameters - `u`: State vector in conservative form. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns State vector in entropy form as an `SVector`. """ function Trixi.cons2entropy(u, eq::BloodFlowEquations2D) a, QRθ, Qs, E, A0 = u A = a + A0 R = radius(u, eq) ws = Qs / A wθ = (4 / 3 * QRθ / R) / A R0 = sqrt(2 * A0) η = R - R0 P = pressure(u, eq) En = entropy(u, eq) return SVector(A, wθ, ws, En, A0) end @doc raw""" friction(u, x, eq::BloodFlowEquations2D) Computes the friction term for the 2D blood flow model. ### Parameters - `u`: State vector. - `x`: Position vector. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Friction term as a scalar. """ function friction(u, x, eq::BloodFlowEquations2D) R = radius(u, eq) # Compute the radius based on cross-sectional area return eltype(u)(-11 * eq.nu / R) # Return friction term based on viscosity and radius end @doc raw""" pressure(u, eq::BloodFlowEquations2D) Computes the pressure for the 2D blood flow model. ### Parameters - `u`: State vector. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Pressure as a scalar. """ function pressure(u, eq::BloodFlowEquations2D) T = eltype(u) A = u[1] + u[5] E = u[4] A0 = u[5] R = radius(u, eq) R0 = sqrt(2 * A0) xi = eq.xi h = eq.h b = E * h / (1 - xi^2) # Precompute constant b return T(b * (R - R0) / R0^2) end @doc raw""" radius(u, eq::BloodFlowEquations2D) Computes the radius based on the cross-sectional area for the 2D blood flow model. ### Parameters - `u`: State vector. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Radius as a scalar. """ function radius(u, eq::BloodFlowEquations2D) return sqrt((u[1] + u[5]) * 2) # Compute radius from cross-sectional area end @doc raw""" inv_pressure(p, u, eq::BloodFlowEquations2D) Computes the inverse of the pressure function for the 2D blood flow model. ### Parameters - `p`: Pressure value. - `u`: State vector. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Inverse pressure as a scalar. """ function inv_pressure(p, u, eq::BloodFlowEquations2D) T = eltype(u) E = u[4] A0 = u[5] R0 = sqrt(2 * A0) xi = eq.xi h = eq.h b = E * h / (1 - xi^2) # Precompute constant b return T((R0^2 * p / b + R0)^2 / 2) end @doc raw""" pressure_der(u, eq::BloodFlowEquations2D) Computes the derivative of the pressure with respect to the cross-sectional area for the 2D blood flow model. ### Parameters - `u`: State vector. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Derivative of pressure as a scalar. """ function pressure_der(u, eq::BloodFlowEquations2D) T = eltype(u) A = u[1] + u[5] E = u[4] A0 = u[5] xi = eq.xi h = eq.h b = E * h / (1 - xi^2) return T((b / sqrt(2)) * 0.5 / (sqrt(A) * A0)) end @doc raw""" Trixi.entropy(u, eq::BloodFlowEquations2D) Computes the entropy for the 2D blood flow model. ### Parameters - `u`: State vector. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Entropy as a scalar. """ function Trixi.entropy(u, eq::BloodFlowEquations2D) up = cons2prim(u, eq) _, _, _, E, _ = u A, wt, ws, P, A0 = up psi = (ws^2 + wt^2 * 9 / 8) / 2 + P b = E * eq.h / (1 - eq.xi^2) pt = b / sqrt(2) / (3 * A0) * (A^(3 / 2) - A0^(3 / 2)) return A * psi - pt end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/2DModel/Test_Cases/pressure_in.jl
.jl
4,506
143
@doc raw""" initial_condition_simple(x, t, eq::BloodFlowEquations2D; R0=2.0) Defines a simple initial condition for the 2D blood flow model. ### Parameters - `x`: Position vector. - `t`: Initial time. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. - `R0`: Initial radius (default is 2.0). ### Returns State vector as an `SVector`. """ function initial_condition_simple(x, t, eq::BloodFlowEquations2D; R0=2.0) T = eltype(x) A0 = T(R0^2 / 2) QRθ = T(0.0) Qs = T(0.0) E = T(1e7) return SVector(zero(T), QRθ, Qs, E, A0) end @doc raw""" curvature(x) Returns a constant curvature for the 2D blood flow model. ### Parameters - `x`: Position vector. ### Returns Curvature as a scalar. """ curvature(x) = typeof(x)(1.0) @doc raw""" source_term_simple(u, x, t, eq::BloodFlowEquations2D) Computes a simple source term for the 2D blood flow model, including friction and curvature effects. ### Parameters - `u`: State vector. - `x`: Position vector. - `t`: Time value. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Source term as an `SVector`. """ function source_term_simple(u, x, t, eq::BloodFlowEquations2D) T = eltype(u) a, QRθ, Qs, _, A0 = u A = a + A0 s1 = zero(T) k = friction(u, x, eq) R = radius(u, eq) s2 = T( 2 * R / 3 * curvature(x[2]) * sin(x[1]) * Qs^2 / A + 3 * R * k * QRθ / A ) s3 = T( -2 * R / 3 * curvature(x[2]) * sin(x[1]) * Qs * QRθ / A + R * k * Qs / A ) return SVector(s1, s2, s3, 0, 0) end @doc raw""" boundary_condition_pressure_in(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations2D) Applies an inflow boundary condition with a prescribed pressure for the 2D blood flow model. ### Parameters - `u_inner`: Inner state vector at the boundary. - `orientation_or_normal`: Orientation index or normal vector indicating the boundary direction. - `direction`: Index indicating the spatial direction (1 for \( \theta \)-direction, otherwise \( s \)-direction). - `x`: Position vector at the boundary. - `t`: Time value. - `surface_flux_function`: Function to compute the surface flux. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Boundary flux as an `SVector`. """ function boundary_condition_pressure_in(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations2D) Pin = ifelse(t < 0.125, 2e4 * sinpi(t / 0.125)^2, 0.0) Ain = inv_pressure(Pin, u_inner, eq) A0in = u_inner[5] ain = Ain - A0in u_boundary = SVector( ain, u_inner[2], u_inner[3], u_inner[4], u_inner[5] ) # Calculate the boundary flux if iseven(direction) # u_inner is "left" of boundary, u_boundary is "right" of boundary flux1 = surface_flux_function[1](u_inner, u_boundary, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_boundary, orientation_or_normal, eq) else # u_boundary is "left" of boundary, u_inner is "right" of boundary flux1 = surface_flux_function[1](u_boundary, u_inner, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_boundary, u_inner, orientation_or_normal, eq) end return flux1,flux2 end @doc raw""" boundary_condition_pressure_in(u_inner, normal, x, t, surface_flux_function, eq::BloodFlowEquations2D) Applies an inflow boundary condition with a prescribed pressure for the 2D blood flow model. This version does not use a specific direction parameter. ### Parameters - `u_inner`: Inner state vector at the boundary. - `normal`: Normal vector indicating the boundary direction. - `x`: Position vector at the boundary. - `t`: Time value. - `surface_flux_function`: Function to compute the surface flux. - `eq::BloodFlowEquations2D`: Instance of `BloodFlowEquations2D`. ### Returns Boundary flux as an `SVector`. """ function boundary_condition_pressure_in(u_inner, normal, x, t, surface_flux_function, eq::BloodFlowEquations2D) Pin = ifelse(t < 0.125, 2e4 * sinpi(t / 0.125)^2, 0.0) A0in = u_inner[5] Ain = inv_pressure(Pin, u_inner, eq) ain = Ain - A0in u_boundary = SVector( ain, u_inner[2], u_inner[3], u_inner[4], u_inner[5] ) flux1 = surface_flux_function[1](u_inner, u_boundary, normal, eq) flux2 = surface_flux_function[2](u_inner, u_boundary, normal, eq) return flux1,flux2 end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/2DModel/Test_Cases/convergence_test.jl
.jl
571
24
function Trixi.initial_condition_convergence_test(x, t, eq::BloodFlowEquations2D) T = eltype(x) R0 = T(1.0) A0 = T(R0^2/2) E = T(1e7) QRθ = Qs = T(sinpi(x[1] * t)) return SVector(zero(T), QRθ,Qs, E, A0) end function Trixi.source_terms_convergence_test(u, x, t, eq::BloodFlowEquations2D) T = eltype(u) A0 = u[4] s1 = pi * t * cospi(x[1] * t) |> T # k = friction(u, x, eq) # R = radius(u, eq) s2 = pi * x[1] * cospi(x[1] * t) + pi * t * cospi(x[1] * t) * sinpi(x[1] * t) / A0 return SVector(s1, s2, 0, 0) end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/1DModel/1dmodel.jl
.jl
4,741
153
@doc raw""" BloodFlowEquations1D(;h,rho=1.0,xi=0.25,nu=0.04) Blood Flow equations in one space dimension. This model describes the dynamics of blood flow along a compliant artery using one-dimensional equations derived from the Navier-Stokes equations. The equations account for conservation of mass and momentum, incorporating the effect of arterial compliance and frictional losses. The governing equations are given by ```math \left\{\begin{aligned} \frac{\partial a}{\partial t} + \frac{\partial}{\partial x}(Q) &= 0 \\ \frac{\partial Q}{\partial t} + \frac{\partial}{\partial x}\left(\frac{Q^2}{A} + A P(a)\right) &= P(a) \frac{\partial A}{\partial x} - 2 \pi R k \frac Q {A}\\ P(a) &= P_{ext} + \frac{Eh\sqrt{\pi}}{1-\xi^2}\frac{\sqrt{A} - \sqrt{A_0}}{A_0} \\ R &= \sqrt{\frac{A}{\pi}} \end{aligned}\right. ``` """ struct BloodFlowEquations1D{T<:Real} <: AbstractBloodFlowEquations{1,4} # constant coefficients h ::T # Wall thickness rho::T # Fluid density xi::T # Poisson's ratio nu::T # Viscosity coefficient end function BloodFlowEquations1D(;h,rho=1.0,xi=0.25,nu=0.04) return BloodFlowEquations1D(h,rho,xi,nu) end Trixi.have_nonconservative_terms(::BloodFlowEquations1D) = Trixi.True() @doc raw""" Trixi.flux(u, orientation::Integer, eq::BloodFlowEquations1D) Computes the flux vector for the conservation laws of the blood flow model. ### Parameters - `u`: State vector. - `orientation::Integer`: Orientation index for flux computation. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Flux vector as an `SVector`. """ function Trixi.flux(u, orientation::Integer,eq::BloodFlowEquations1D) # up = cons2prim(u,eq) P = pressure(u,eq) a,Q,E,A0 = u A = a+A0 f1 = Q f2 = Q^2/A+A*P return SVector(f1,f2,0,0) end @doc raw""" flux_nonconservative(u_ll, u_rr, orientation::Integer, eq::BloodFlowEquations1D) Computes the non-conservative flux for the model, used for handling discontinuities in pressure. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `orientation::Integer`: Orientation index. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Non-conservative flux vector. """ function flux_nonconservative(u_ll,u_rr,orientation::Integer,eq::BloodFlowEquations1D) T = eltype(u_ll) p_ll = pressure(u_ll,eq) p_rr = pressure(u_rr,eq) pmean = (p_ll+p_rr)/2 a_ll,_,_,A0_ll = u_ll a_rr,_,_,A0_rr = u_rr A_ll = a_ll + A0_ll A_rr = a_rr + A0_rr Ajump = A_rr - A_ll return SVector(zero(T),-pmean*Ajump,0,0) end @doc raw""" Trixi.max_abs_speed_naive(u_ll, u_rr, orientation::Integer, eq::BloodFlowEquations1D) Calculates the maximum absolute speed for wave propagation in the blood flow model using a naive approach. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `orientation::Integer`: Orientation index. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Maximum absolute speed. """ function Trixi.max_abs_speed_naive(u_ll,u_rr,orientation::Integer,eq ::BloodFlowEquations1D) a_ll,Q_ll,E_ll,A0_ll = u_ll a_rr,Q_rr,E_rr,A0_rr = u_rr A_ll = a_ll + A0_ll A_rr = a_rr + A0_rr pp_ll = pressure_der(u_ll,eq) pp_rr = pressure_der(u_rr,eq) w_ll = Q_ll/A_ll w_rr = Q_rr/A_rr return max(abs(w_ll),abs(w_rr))+max(sqrt(A_ll*pp_ll),sqrt(A_rr*pp_rr)) end @doc raw""" Computes the maximum absolute speed for wave propagation in the model. ### Parameters - `u`: State vector. - `eq::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D`. ### Returns Maximum absolute speed as a scalar value. """ function Trixi.max_abs_speeds(u,eq::BloodFlowEquations1D) a,Q,E,A0 = u A = a+A0 pp= pressure_der(u,eq) return abs(Q/A) + sqrt(A*pp) end @doc raw""" (dissipation::Trixi.DissipationLocalLaxFriedrichs)(u_ll, u_rr, orientation_or_normal_direction, eq::BloodFlowEquations1D) Calculates the dissipation term using the Local Lax-Friedrichs method. ### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `orientation_or_normal_direction`: Orientation or normal direction. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Dissipation vector. """ function (dissipation::Trixi.DissipationLocalLaxFriedrichs)(u_ll, u_rr, orientation_or_normal_direction, eq::BloodFlowEquations1D) λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, eq) diss = -0.5f0 .* abs(λ) .* (u_rr .- u_ll) return SVector(diss[1], diss[2],0,0) end include("./variables.jl") include("./bc1d.jl") include("./Test_Cases/pressure_in.jl") include("./Test_Cases/convergence_test.jl") include("./Ord2/1dmodelord2.jl") include("./viz.jl")
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/1DModel/viz.jl
.jl
3,590
84
@doc raw""" get3DData(eq::BloodFlowEquations1D,semi,sol,time_index ::Int = 1;theta_disc ::Int = 32,vtk ::Bool=false,out ::T="./datas") where T<:AbstractString Generates 3D spatial data from a 1D blood flow model for visualization. This function extracts unique node coordinates, computes relevant flow parameters, and generates a 3D representation of the arterial domain using cylindrical coordinates. Optionally, it can export the data in VTK format. ### Parameters - `eq::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D` representing the blood flow model. - `semi`: Semi-discretization structure containing mesh and numerical information. - `sol`: Solution array containing the numerical state variables. - `time_index::Int=1`: Time step index for extracting the solution (default: 1). - `theta_disc::Int=32`: Number of angular discretization points for the cylindrical representation (default: 32). - `vtk::Bool=false`: Whether to export data to VTK format (default: `false`). - `out::T="./datas"`: Output directory for VTK files (default: `"./datas"`). ### Returns Named tuple containing: - `x`: X-coordinates of the generated 3D points. - `y`: Y-coordinates of the generated 3D points. - `z`: Z-coordinates of the generated 3D points. - `A`: Cross-sectional areas at each point. - `w`: Flow velocities at each point. - `P`: Pressure values at each point. ### Notes - The function first extracts unique spatial positions from the mesh. - The blood flow variables (`A`, `Q`, `E`, `A0`) are obtained from the solution array. - Pressure is computed using the `pressure` function. - A cylindrical coordinate transformation is applied to represent the vessel cross-section. - If `vtk` is `true`, the function writes the data to VTK format using `vtk_grid`. """ function get3DData(eq::BloodFlowEquations1D,semi,sol,time_index ::Int = 1;theta_disc ::Int = 32,vtk ::Bool=false,out ::T="./datas") where T<:AbstractString xval_not = semi.cache.elements.node_coordinates[:] # Get unique values unique_values = unique(xval_not) indices = [findfirst(==(val), xval_not) for val in unique_values] xval = xval_not[indices] soltime = sol[time_index] aval = @view( @view(soltime[1:4:end])[indices] ) Qval = @view( @view(soltime[2:4:end])[indices]) Eval = @view( @view(soltime[3:4:end])[indices]) A0val = @view( @view(soltime[4:4:end])[indices]) Pval = map((a,Q,E,A0)->pressure(SA[a,Q,E,A0],eq),aval,Qval,Eval,A0val) theta = range(0,2π,theta_disc)[1:end-1] M(theta,x,R) = (x,R*cos(theta),R*sin(theta)) Typ = eltype(xval) s = (theta_disc-1)*length(xval) x = zeros(Typ,s) y = zeros(Typ,s) z = zeros(Typ,s) A = zeros(Typ,s) w = zeros(Typ,s) P = zeros(Typ,s) c=1 for i in eachindex(xval,aval,Qval,A0val,Pval) xvali = xval[i] Avali = aval[i] + A0val[i] wvali =Qval[i]/Avali Pvali = Pval[i] Rvali = sqrt(Avali/pi) for thetaj in theta xi,yi,zi = M(thetaj,xvali,Rvali) x[c] = xi y[c] = yi z[c] = zi A[c] = Avali w[c] = wvali P[c] = Pvali c+=1 end end if vtk npoints = length(x) cells = [MeshCell(VTKCellTypes.VTK_VERTEX, (i, )) for i = 1:npoints] vtk_grid(joinpath(out,"./points$time_index"), x, y, z, cells) do vtk vtk["Area", VTKPointData()] = A vtk["Speed", VTKPointData()] = w vtk["Pressure", VTKPointData()] = P end end return (;x=x,y=y,z=z,A=A,w=w) end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/1DModel/variables.jl
.jl
5,489
224
@doc raw""" Trixi.varnames(::typeof(cons2cons), ::BloodFlowEquations1D) Returns the variable names corresponding to the conserved variables in the blood flow model. ### Parameters - `::typeof(cons2cons)`: Type indicating conserved to conserved variable conversion. - `::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D`. ### Returns A tuple of variable names: `("a", "Q", "E", "A0")`. """ Trixi.varnames(::typeof(cons2cons),::BloodFlowEquations1D) = ("a","Q","E","A0") @doc raw""" Trixi.varnames(::typeof(cons2prim), ::BloodFlowEquations1D) Returns the variable names corresponding to the primitive variables in the blood flow model. ### Parameters - `::typeof(cons2prim)`: Type indicating conserved to primitive variable conversion. - `::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D`. ### Returns A tuple of variable names: `("A", "w", "P", "A0", "P")`. """ Trixi.varnames(::typeof(cons2prim),::BloodFlowEquations1D) = ("A","w","P","A0","P") @doc raw""" Trixi.varnames(::typeof(cons2entropy), ::BloodFlowEquations1D) Returns the variable names corresponding to the entropy variables in the blood flow model. ### Parameters - `::typeof(cons2entropy)`: Type indicating conserved to entropy variable conversion. - `::BloodFlowEquations1D`: Instance of `BloodFlowEquations1D`. ### Returns A tuple of variable names: `("A", "w", "En", "A0", "P")`. """ Trixi.varnames(::typeof(cons2entropy),::BloodFlowEquations1D) = ("A","w","En","A0","P") @doc raw""" Trixi.cons2prim(u, eq::BloodFlowEquations1D) Converts the conserved variables to primitive variables. ### Parameters - `u`: State vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Primitive variable vector. """ function Trixi.cons2prim(u,eq::BloodFlowEquations1D) a,Q,E,A0 = u P = pressure(u,eq) A = a+A0 w = Q/A return SVector(A,w,P,A0) end @doc raw""" Trixi.cons2entropy(u, eq::BloodFlowEquations1D) Converts the conserved variables to entropy variables. ### Parameters - `u`: State vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Entropy variable vector. """ function Trixi.cons2entropy(u,eq::BloodFlowEquations1D) a,Q,E,A0 = u P = pressure(u,eq) A = a+A0 w = Q/A En = entropy(u,eq) return SVector(A,w,En,A0) end @doc raw""" Trixi.prim2cons(u, eq::BloodFlowEquations1D) Converts the primitive variables to conserved variables. ### Parameters - `u`: Primitive variable vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Conserved variable vector. """ function Trixi.prim2cons(u,eq::BloodFlowEquations1D) A,w,P,A0 = u a = A-A0 Q = w*A E = P/sqrt(pi)*A0/(sqrt(A)-sqrt(A0))*(1-eq.xi^2)/eq.h return SVector(a,Q,E,A0) end @doc raw""" friction(u, x, eq::BloodFlowEquations1D) Calculates the friction term for the blood flow equations, which represents viscous resistance to flow along the artery wall. ### Parameters - `u`: State vector containing cross-sectional area and flow rate. - `x`: Position along the artery. - `eq::BloodFlowEquations1D`: Instance of the blood flow model. ### Returns Friction coefficient as a scalar. """ function friction(u,x,eq::BloodFlowEquations1D) R=radius(u,eq) return eltype(u)(-11*eq.nu/R) end @doc raw""" pressure(u, eq::BloodFlowEquations1D) Computes the pressure given the state vector based on the compliance of the artery. ### Parameters - `u`: State vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Pressure as a scalar. """ function pressure(u,eq::BloodFlowEquations1D) T = eltype(u) A = u[1]+u[4] E = u[3] A0 = u[4] xi = eq.xi h = eq.h b = E*h*sqrt(pi)/(1-xi^2) return T(b*(sqrt(A)-sqrt(A0))/A0) end @doc raw""" radius(u, eq::BloodFlowEquations1D) Computes the radius of the artery based on the cross-sectional area. ### Parameters - `u`: State vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Radius as a scalar. """ function radius(u,eq::BloodFlowEquations1D) return sqrt((u[1]+u[4])/pi) end @doc raw""" inv_pressure(p, u, eq::BloodFlowEquations1D) Computes the inverse relation of pressure to cross-sectional area. ### Parameters - `p`: Pressure. - `u`: State vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Cross-sectional area corresponding to the given pressure. """ function inv_pressure(p,u,eq::BloodFlowEquations1D) T = eltype(u) E = u[3] A0 = u[4] xi = eq.xi h = eq.h # A0 p/b b = E*h*sqrt(pi)/(1-xi^2) return T((A0*p/b+sqrt(A0))^2) end @doc raw""" pressure_der(u, eq::BloodFlowEquations1D) Computes the derivative of pressure with respect to cross-sectional area. ### Parameters - `u`: State vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Derivative of pressure. """ function pressure_der(u,eq::BloodFlowEquations1D) T = eltype(u) A = u[1]+u[4] E = u[3] A0 = u[4] xi = eq.xi h = eq.h return T(E*h*sqrt(pi)/(1-xi^2)*0.5/(sqrt(A)*A0)) end @doc raw""" Trixi.entropy(u, eq::BloodFlowEquations1D) Computes the entropy of the system for the given state vector. ### Parameters - `u`: State vector. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Entropy as a scalar value. """ function Trixi.entropy(u,eq::BloodFlowEquations1D) up = cons2prim(u,eq) _,_,E,_ = u A,w,P,A0 = up psi = w^2/2+P b = E*eq.h/(1-eq.xi^2) pt = b*sqrt(pi)/(3*A0)*(A^(3/2)-A0^(3/2)) return A*psi - pt end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/1DModel/bc1d.jl
.jl
3,006
80
@doc raw""" boundary_condition_outflow(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations1D) Implements the outflow boundary condition, assuming that there is no reflection at the boundary. ### Parameters - `u_inner`: State vector inside the domain near the boundary. - `orientation_or_normal`: Normal orientation of the boundary. - `direction`: Integer indicating the direction of the boundary. - `x`: Position vector. - `t`: Time. - `surface_flux_function`: Function to compute flux at the boundary. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Computed boundary flux. """ function boundary_condition_outflow(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations1D) # calculate the boundary flux if iseven(direction) # u_inner is "left" of boundary, u_boundary is "right" of boundary flux1 = surface_flux_function[1](u_inner, u_inner, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_inner, orientation_or_normal, eq) else # u_inner is "left" of boundary, u_inner is "right" of boundary flux1 = surface_flux_function[1](u_inner, u_inner, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_inner, orientation_or_normal, eq) end return flux1,flux2 end @doc raw""" boundary_condition_slip_wall(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations1D) Implements a slip wall boundary condition where the normal component of velocity is reflected. ### Parameters - `u_inner`: State vector inside the domain near the boundary. - `orientation_or_normal`: Normal orientation of the boundary. - `direction`: Integer indicating the direction of the boundary. - `x`: Position vector. - `t`: Time. - `surface_flux_function`: Function to compute flux at the boundary. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Computed boundary flux at the slip wall. """ function boundary_condition_slip_wall(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations1D) # create the "external" boundary solution state u_boundary = SVector(u_inner[1], -u_inner[2], u_inner[3], u_inner[4]) # calculate the boundary flux if iseven(direction) # u_inner is "left" of boundary, u_boundary is "right" of boundary flux1 = surface_flux_function[1](u_inner, u_boundary, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_boundary, orientation_or_normal, eq) else # u_boundary is "left" of boundary, u_inner is "right" of boundary flux1 = surface_flux_function[1](u_boundary, u_inner, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_boundary, u_inner, orientation_or_normal, eq) end return flux1,flux2 end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/1DModel/Ord2/1dmodelord2.jl
.jl
2,548
67
struct BloodFlowEquations1DOrd2{E} <: Trixi.AbstractEquationsParabolic{1, 4, GradientVariablesConservative} model1d ::E end Trixi.varnames(mapin,eq::BloodFlowTrixi.BloodFlowEquations1DOrd2) = Trixi.varnames(mapin,eq.model1d) function Trixi.flux(u,gradients,orientation::Int,eq_parab ::BloodFlowEquations1DOrd2) dudx = gradients a,Q,_,A0 = u A = a+A0 val = -3*eq_parab.model1d.nu * (-(dudx[1] + dudx[4])*Q/A + dudx[2]) return SVector(0.0,val,0,0) end function source_term_simple_ord2(u, x, t, eq::BloodFlowEquations1D) res = source_term_simple(u, x, t, eq) k = friction(u,x,eq) R = radius(u,eq) return SVector(res[1],res[2]/(1-R*k/(4*eq.nu)),res[3],res[4]) end @inline function boundary_condition_pressure_in(flux_inner, u_inner, orientation_or_normal,direction, x, t, operator_type::Trixi.Gradient, equations_parabolic::BloodFlowEquations1DOrd2) return boundary_condition_pressure_in(u_inner,orientation_or_normal,direction,x,t,flux_lax_friedrichs,equations_parabolic.model1d) end @inline function boundary_condition_pressure_in(flux_inner, u_inner, orientation_or_normal,direction, x, t, operator_type::Trixi.Divergence, equations_parabolic::BloodFlowEquations1DOrd2) return flux_inner end # Dirichlet and Neumann boundary conditions for use with parabolic solvers in weak form. # Note that these are general, so they apply to LaplaceDiffusion in any spatial dimension. @inline function (boundary_condition::Trixi.BoundaryConditionDirichlet)(flux_inner, u_inner, orientation_or_normal,direction, x, t, operator_type::Trixi.Gradient, equations_parabolic::BloodFlowEquations1DOrd2) return boundary_condition.boundary_value_function(x, t, equations_parabolic) end @inline function (boundary_condition::Trixi.BoundaryConditionDirichlet)(flux_inner, u_inner, orientation_or_normal,direction, x, t, operator_type::Trixi.Divergence, equations_parabolic::BloodFlowEquations1DOrd2) return flux_inner end @inline function (boundary_condition::Trixi.BoundaryConditionNeumann)(flux_inner, u_inner, orientation_or_normal,direction, x, t, operator_type::Trixi.Divergence, equations_parabolic::BloodFlowEquations1DOrd2) return boundary_condition.boundary_normal_flux_function(x, t, equations_parabolic) end @inline function (boundary_condition::Trixi.BoundaryConditionNeumann)(flux_inner, u_inner, orientation_or_normal,direction, x, t, operator_type::Trixi.Gradient, equations_parabolic::BloodFlowEquations1DOrd2) return flux_inner end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/1DModel/Test_Cases/pressure_in.jl
.jl
3,618
106
@doc raw""" initial_condition_simple(x, t, eq::BloodFlowEquations1D; R0=2.0) Generates a simple initial condition with a specified initial radius `R0`. ### Parameters - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations1D`: Instance of the blood flow model. - `R0`: Initial radius (default: `2.0`). ### Returns State vector with zero initial area perturbation, zero flow rate, constant elasticity modulus, and reference area computed as `A_0 = \pi R_0^2`. This initial condition is suitable for basic tests without complex dynamics. """ function initial_condition_simple(x, t, eq::BloodFlowEquations1D; R0=2.0) T = eltype(x) A0 = T(pi * R0^2) Q = T(0.0) E = T(1e7) return SVector(zero(T), Q, E, A0) end @doc raw""" source_term_simple(u, x, t, eq::BloodFlowEquations1D) Computes a simple source term for the blood flow model, focusing on frictional effects. ### Parameters - `u`: State vector containing area perturbation, flow rate, elasticity modulus, and reference area. - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations1D`: Instance of the blood flow model. ### Returns Source terms vector where: - `s_1 = 0` (no source for area perturbation). - `s_2` represents the friction term given by `s_2 = \frac{2 \pi k Q}{R A}`. Friction coefficient `k` is computed using the `friction` function, and the radius `R` is obtained using the `radius` function. """ function source_term_simple(u, x, t, eq::BloodFlowEquations1D) T = eltype(u) a, Q, _, A0 = u A = a + A0 s1 = zero(T) k = friction(u, x, eq) R = radius(u, eq) s2 = T(2 * pi * k * R * Q / A) return SVector(s1, s2, 0, 0) end @doc raw""" boundary_condition_pressure_in(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations1D) Implements a pressure inflow boundary condition where the inflow pressure varies with time. ### Parameters - `u_inner`: State vector inside the domain near the boundary. - `orientation_or_normal`: Normal orientation of the boundary. - `direction`: Integer indicating the boundary direction. - `x`: Position vector. - `t`: Time scalar. - `surface_flux_function`: Function to compute flux at the boundary. - `eq`: Instance of `BloodFlowEquations1D`. ### Returns Computed boundary flux with inflow pressure specified by: ```math P_{in} = \begin{cases} 2 \times 10^4 \sin^2(\pi t / 0.125) & \text{if } t < 0.125 \\ 0 & \text{otherwise} \end{cases} ``` The corresponding inflow area `A_{in}` is computed using the inverse pressure relation, and the boundary state is constructed accordingly. """ function boundary_condition_pressure_in(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations1D) Pin = ifelse(t < 0.125, 2e4 * sinpi(t / 0.125)^2, 0.0) Ain = inv_pressure(Pin, u_inner, eq) A0in = u_inner[4] ain = Ain - A0in u_boundary = SVector( ain, u_inner[2], u_inner[3], u_inner[4] ) # calculate the boundary flux if iseven(direction) # u_inner is "left" of boundary, u_boundary is "right" of boundary flux1 = surface_flux_function[1](u_inner, u_boundary, orientation_or_normal, eq) flux2 = surface_flux_function[2](u_inner, u_boundary, orientation_or_normal, eq) else # u_boundary is "left" of boundary, u_inner is "right" of boundary flux1 = surface_flux_function[1](u_boundary, u_inner, orientation_or_normal,eq) flux2 = surface_flux_function[2](u_boundary, u_inner, orientation_or_normal,eq) end return flux1,flux2 end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
src/1DModel/Test_Cases/convergence_test.jl
.jl
2,354
64
@doc raw""" initial_condition_convergence_test(x, t, eq::BloodFlowEquations1D) Generates a smooth initial condition for convergence tests of the blood flow equations. ### Parameters - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations1D`: Instance of the blood flow model. ### Returns Initial condition state vector with zero initial area perturbation, sinusoidal flow rate, a constant elasticity modulus, and reference area. ### Details The returned initial condition has: - Zero perturbation in area (`a = 0`). - A sinusoidal flow rate given by `Q = sin(\pi x t)`. - A constant elasticity modulus `E`. - A reference cross-sectional area `A_0 = \pi R_0^2` for `R_0 = 1`. This initial condition can be used to verify the accuracy and stability of numerical solvers. """ function Trixi.initial_condition_convergence_test(x, t, eq::BloodFlowEquations1D) T = eltype(x) R0 = T(1.0) A0 = T(pi) * R0^2 E = T(1e7) Q = T(sinpi(x[1] * t)) return SVector(zero(T), Q, E, A0) end @doc raw""" source_terms_convergence_test(u, x, t, eq::BloodFlowEquations1D) Computes the source terms for convergence tests of the blood flow equations. ### Parameters - `u`: State vector containing area perturbation, flow rate, elasticity modulus, and reference area. - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations1D`: Instance of the blood flow model. ### Returns Source terms vector. ### Details The source terms are derived based on the smooth initial condition and friction effects: - `s_1` represents the source term for area perturbation and is given by `s_1 = \pi t \cos(\pi x t)`. - `s_2` represents the source term for the flow rate and includes contributions from spatial and temporal variations as well as friction effects. The radius `R` is computed using the `radius` function, and the friction coefficient `k` is obtained using the `friction` function. This function is useful for evaluating the correctness of source term handling in numerical solvers. """ function Trixi.source_terms_convergence_test(u, x, t, eq::BloodFlowEquations1D) T = eltype(u) A0 = u[4] s1 = pi * t * cospi(x[1] * t) |> T # k = friction(u, x, eq) # R = radius(u, eq) s2 = pi * x[1] * cospi(x[1] * t) + pi * t * cospi(x[1] * t) * sinpi(x[1] * t) / A0 return SVector(s1, s2, 0, 0) end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
ext/BloodFlowTrixiDataInterpolationsExt.jl
.jl
1,637
41
module BloodFlowTrixiDataInterpolationsExt if isdefined(Base, :get_extension) using BloodFlowTrixi using DataInterpolations else using ..BloodFlowTrixi using ..DataInterpolations end using StaticArrays, LinearAlgebra using ForwardDiff function BloodFlowTrixi.interpolate_curve(curve_data::AbstractArray) N = length(curve_data) quadinterp = QuadraticSpline(curve_data,range(0,1,N)) curve = SmoothArcLengthInterpolation(quadinterp;m=N,in_place=false) return curve end function BloodFlowTrixi.get3DData(eq::BloodFlowEquations2D,curve_data::AbstractArray,semi,sol,time_index ::Int = 1;vtk ::Bool=false,out ::T="./datas") where {T<:AbstractString} curve = interpolate_curve(curve_data) tanj(s) = ForwardDiff.derivative(curve,s) function nor(s) res= ForwardDiff.derivative(tanj,s) n = norm(res) if n ≈ 0 a,b,c = tanj(s) # return a any normal vector if a != 0 return SA[-b,a,0]/sqrt(a^2+b^2) elseif b != 0 return SA[-b,a,0]/sqrt(a^2+b^2) else return SA[-sign(c),0,0] end end return res/n end ∧(v,w) = SA[v[2]*w[3]-v[3]*w[2],v[3]*w[1]-v[1]*w[3],v[1]*w[2]-v[2]*w[1]] er(theta,s) = cos(theta).*nor(s) .+ sin(theta).*∧(tanj(s),nor(s)) return BloodFlowTrixi.get3DData(eq,s->curve(s),er,semi,sol,time_index;vtk=vtk,out=out) end export get3DData,interpolate_curve end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
test/runtests.jl
.jl
359
14
using BloodFlowTrixi using Test @testset "BloodFlowTrixi.jl" begin include("./Aqua/aquatest.jl") include("./Extensions/DataInterpolationsTest.jl") @testset "1D Blood Flow Model" begin include("../exemples/Model1D/exemple.jl") end @testset "2D Blood Flow Model" begin include("../exemples/Model2D/exemple.jl") end end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
test/Extensions/DataInterpolationsTest.jl
.jl
137
6
using DataInterpolations @testset "2D Blood Flow Model with interpolation" begin include("../../exemples/Model2D/diexemple.jl") end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
test/Aqua/aquatest.jl
.jl
112
6
using Aqua @testset "Code quality (Aqua.jl)" begin Aqua.test_all(BloodFlowTrixi; ambiguities = false,) end
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
docs/make.jl
.jl
623
26
using BloodFlowTrixi using Documenter DocMeta.setdocmeta!(BloodFlowTrixi, :DocTestSetup, :(using BloodFlowTrixi); recursive=true) makedocs(; modules=[BloodFlowTrixi], authors="yolhan83 <yolhan@laposte.net>", sitename="BloodFlowTrixi.jl", format=Documenter.HTML(; canonical="https://github.com/JuliaHealth/BloodFlowTrixi.jl", edit_link="master", assets=String[], ), pages=[ "Home" => "index.md", "Tutorial" => "tuto.md", "Mathematics" => "math.md" ], ) deploydocs(; repo="github.com/JuliaHealth/BloodFlowTrixi.jl", devbranch="master", )
Julia
3D
JuliaHealth/BloodFlowTrixi.jl
docs/src/index.md
.md
4,372
89
```@meta CurrentModule = BloodFlowTrixi ``` # BloodFlowTrixi.jl **BloodFlowTrixi.jl** is a Julia package that implements one-dimensional (1D) and two-dimensional (2D) blood flow models for arterial circulation. These models are derived from the Navier-Stokes equations and were developed as part of my PhD research in applied mathematics, focusing on cardiovascular pathologies such as aneurysms and stenoses. ## Description This package provides: - **1D Blood Flow Model**: This model describes blood flow along compliant arteries in a single spatial dimension. It was derived under the assumption of axisymmetric flow and accounts for arterial compliance, inertia, and frictional losses. More details about this model can be found in my corresponding publication: **[[Article 1D](https://hal.science/hal-04676130v1)]** ```math \left\{\begin{aligned} \frac{\partial a}{\partial t} + \frac{\partial}{\partial x}(Q) &= 0 \\ \frac{\partial Q}{\partial t} + \frac{\partial}{\partial x}\left(\frac{Q^2}{A} + A P(a)\right) &= P(a) \frac{\partial A}{\partial x} - 2 \pi R k \frac Q {A}\\ P(a) &= P_{ext} + \frac{Eh\sqrt{\pi}}{1-\xi^2}\frac{\sqrt{A} - \sqrt{A_0}}{A_0} \\ R &= \sqrt{\frac{A}{\pi}} \end{aligned}\right. ``` - **2D Blood Flow Model**: The 2D model extends the Navier-Stokes equations under the thin-artery assumption, allowing for simulations in complex arterial geometries using curvilinear coordinates. It captures both longitudinal and angular dynamics, making it more accurate than classical 1D models while being less computationally expensive than full 3D models. This model is described in detail in: **[[Article 2D](https://hal.science/hal-04700161v1)]** ```math \left\{\begin{aligned} \frac{\partial a}{\partial t} + \frac{\partial}{\partial \theta}\left( \frac{Q_{R\theta}}{A} \right) + \frac{\partial}{\partial s}(Q_s) &= 0 \\ \frac{\partial Q_{R\theta}}{\partial t} + \frac{\partial}{\partial \theta}\left(\frac{Q_{R\theta}^2}{2A^2} + A P(a)\right) + \frac{\partial}{\partial s}\left( \frac{Q_{R\theta}Q_s}{A} \right) &= P(a) \frac{\partial A}{\partial \theta} - 2 R k \frac{Q_{R\theta}}{A} + \frac{2R}{3} \mathcal{C}\sin \theta \frac{Q_s^2}{A} \\ \frac{\partial Q_{s}}{\partial t} + \frac{\partial}{\partial \theta}\left(\frac{Q_{R\theta} Q_s}{A^2} \right) + \frac{\partial}{\partial s}\left( \frac{Q_s^2}{A} - \frac{Q_{R\theta}^2}{2A^2} + A P(a) \right) &= P(a) \frac{\partial A}{\partial s} - R k \frac{Q_s}{A} - \frac{2R}{3} \mathcal{C}\sin \theta \frac{Q_s Q_{R\theta}}{A^2} \\ P(a) &= P_{ext} + \frac{Eh}{\sqrt{2}\left(1-\xi^2\right)}\frac{\sqrt{A} - \sqrt{A_0}}{A_0} \\ R &= \sqrt{2A} \end{aligned}\right. ``` Both models were designed to be used with **[Trixi.jl](https://github.com/trixi-framework/Trixi.jl)**, a flexible and high-performance framework for solving systems of conservation laws using the Discontinuous Galerkin (DG) method. ## Features - **1D and 2D models** for arterial blood flow. - Derived from the Navier-Stokes equations with appropriate assumptions for compliant arteries. - To be used with **Trixi.jl** for DG-based numerical simulations. - Support for curvilinear geometries and compliant wall dynamics. ## Installation To install **BloodFlowTrixi.jl**, use the following commands in Julia: ```bash julia> ] pkg> add Trixi pkg> add BloodFlowTrixi ``` ## Future Plans **short term** - Add second order 1D model. - Design prim variables for 1D and 2D models. - Add proper tests for 1D and 2D models. - Add 3D representations of the solutions for 1D and 2D models. - Design easy to use interfaces for users to define their own initial and boundary conditions and source terms. **long term** - Add 3D fluid-structure interaction models for complex arterial geometries. - Design support for artery networks and simulate vascular networks using the 2D and 1D model. - Autodiff support for 1D and 2D models for parameter optimization. ## License This package is licensed under the MIT license. ## Acknowledgments This package was developed as part of my PhD research in applied mathematics, focusing on mathematical modeling and numerical simulation of blood flow in arteries. Special thanks to the developers of **Trixi.jl**, whose framework was invaluable in implementing and testing these models. ```@index ``` ```@autodocs Modules = [BloodFlowTrixi] ```
Markdown
3D
JuliaHealth/BloodFlowTrixi.jl
docs/src/math.md
.md
3,997
89
# 1D and 2D Mathematical Models for Blood Flow ## 1D Model The 1D model is based on a **cross-sectional integration of the Navier-Stokes equations** under the assumption of **incompressible flow** in thin arteries. This model is particularly suitable for global studies of the arterial network, where the geometry is approximately linear or weakly curved. ### Assumptions and Simplifications - The flow is considered **incompressible**. - The artery is modeled as a cylindrical tube with a cross-section varying with pressure. ### Main Equations The derived equations form a system of **hyperbolic partial differential equations** describing mass and momentum conservation: 1. **Mass conservation**: ```math ∂_t A + ∂_x Q = 0 ``` 2. **Momentum conservation**: ```math ∂_t Q + ∂_x \left( \frac{Q^2}{A} + \frac{1}{\rho} A P(A, x) \right) - \partial_x \left( 3\nu A \partial_x\left(\frac{Q}{A}\right) \right) = \frac{1}{\rho} P(A, x) ∂_x A - \frac{2\pi R K}{1-\frac{Rk}{4\nu}} \frac{Q}{A} ``` ### Energy and Entropy Relation of the 1D Model The energy associated with the system is given by: ```math E(t, x) = \frac{A u_x^2}{2} + \frac{1}{\rho} A P(A, x) - \frac{\beta(x)}{3 \rho A_0(x)} A^{3/2} ``` The entropy relation verified by this energy is: ```math ∂_t E + ∂_x \left( \left( E + \frac{\beta(x)}{3 \rho A_0(x)} A^{3/2} \right) u_x \right) = ∂_x \left( 3 \nu A ∂_x \left( \frac{Q}{A} \right) \right) u_x + \frac{2 \pi R k}{1 - R k / 4 \nu} u_x^2 ≤ 0 ``` Under null boundary conditions: ```math ∂_t \left( \int_0^L E \, dx \right) = - 3 \nu \int_0^L A (∂_x u_x)^2 \, dx - \frac{2 \pi R k}{1 - R k / 4 \nu} \int_0^L u_x^2 \, dx < 0 ``` --- ## 2D Model The 2D model is derived from a **radial integration of the Navier-Stokes equations**, enabling better representation of local effects in complex geometric configurations, such as **arterial bifurcations** and **severe aneurysms**. ### Assumptions and Simplifications - The flow is assumed **incompressible**. - The artery geometry is described using a curvilinear coordinate system (\( s, \theta \)). - The velocity profile is obtained without relying on a specific ansatz. ### Main Equations 1. **Mass conservation**: ```math ∂_t A + ∂_θ \left( \frac{Q_{Rθ}}{A} \right) + ∂_s(Q_s) = 0 ``` 2. **Momentum conservation (radial and axial components)**: ```math ∂_t (Q_{Rθ}) + ∂_θ \left( \frac{Q_{Rθ}^2}{2 A^2} + A P \right) + ∂_s \left( \frac{Q_{Rθ} Q_s}{A} \right) = \frac{2 R}{3} C \sin θ \frac{Q_s^2}{A} + \frac{2 R k Q_{Rθ}}{A} + P∂_θ (A) ``` ```math ∂_t (Q_s) + ∂_θ \left( \frac{Q_s Q_{Rθ}}{A^2} \right) + ∂_s \left( \frac{Q_s^2}{A} - \frac{Q_{Rθ}^2}{2 A^2} + A P \right) = - \frac{2 R}{3} C \sin θ \frac{Q_{Rθ} Q_s}{A^2} + \frac{k R Q_s}{A} + P∂_s (A) ``` ### Energy and Entropy Relation of the 2D Model The energy associated with the system is given by: ```math E(t, θ, s) = A \left( \frac{9}{8} u_θ^2 + \frac{u_s^2}{2} + p \right) - \tilde{p} ``` The corresponding entropy relation is: ```math ∂_t E + ∂_θ \left( \frac{3}{2} \frac{u_θ}{R} \left( E + \tilde{p} - \frac{9}{16} A u_θ^2 \right) \right) + ∂_s \left( u_s \left( E + \tilde{p} - \frac{9}{16} A u_θ^2 \right) \right) = \frac{9}{4} R k u_θ^2 + k R u_s^2 ≤ 0 ``` This relation ensures that the energy locally decreases over time, guaranteeing the **stability** of the model. --- ## Comparison of 1D and 2D Models - **1D Model**: - Fast and efficient for global simulations of large arterial networks. - Well-suited for simple or weakly curved geometries. - Very low computational cost. - **2D Model**: - More accurate for complex geometries (bifurcations, aneurysms). - Better captures local effects and fluid-structure interactions. - Moderate computational cost compared to three-dimensional models (3D NS-FSI). The combined use of these two models provides an **efficient alternative to 3D simulations**, offering a good compromise between accuracy and computational cost.
Markdown
3D
JuliaHealth/BloodFlowTrixi.jl
docs/src/tuto.md
.md
22,644
602
## Tutorial for the 1D model In this section, we describe how to use **BloodFlowTrixi.jl** with **Trixi.jl**. This tutorial will guide you through setting up and running a 1D blood flow simulation, including mesh creation, boundary conditions, numerical fluxes, and visualization of results. ### Packages Before starting, ensure that the required packages are loaded: ```julia using Trixi using BloodFlowTrixi using OrdinaryDiffEq using Plots ``` First, we need to choose the equation that describes the blood flow dynamics: ```julia eq = BloodFlowEquations1D(; h=0.1) ``` Here, `h` represents a parameter related to the initial condition or model scaling. ### Mesh and boundary conditions We begin by defining a one-dimensional Tree mesh, which discretizes the spatial domain: ```julia mesh = TreeMesh(0.0, 40.0, initial_refinement_level=6, n_cells_max=10^4, periodicity=false) ``` This generates a non-periodic mesh for the interval $[0, 40]$, with $2^{initialRefinementLevel+1}-1$ cells. The parameter `initial_refinement_level` controls the initial number of cells, while `n_cells_max` specifies the maximum number of cells allowed during mesh refinement. In **Trixi.jl**, the Tree mesh has two labeled boundaries: **x_neg** (left boundary) and **x_pos** (right boundary). These labels are used to apply boundary conditions: ```julia bc = ( x_neg = boundary_condition_pressure_in, x_pos = Trixi.BoundaryConditionDoNothing() ) ``` - `boundary_condition_pressure_in` applies a pressure inflow condition at the left boundary. - `Trixi.BoundaryConditionDoNothing()` specifies a "do nothing" boundary condition at the right boundary, meaning no flux is imposed. #### Boundary condition implementation The inflow boundary condition is defined as: ```julia boundary_condition_pressure_in(u_inner, orientation_or_normal, direction, x, t, surface_flux_function, eq::BloodFlowEquations1D) ``` This function applies a time-dependent pressure inflow condition. #### Parameters - `u_inner`: State vector inside the domain near the boundary. - `orientation_or_normal`: Normal orientation of the boundary. - `direction`: Integer indicating the boundary direction. - `x`: Position vector. - `t`: Time scalar. - `surface_flux_function`: Function to compute flux at the boundary. - `eq`: Instance of `BloodFlowEquations1D`. #### Returns The boundary flux is computed based on the inflow pressure: ```math P_{\text{in}} = \begin{cases} 2 \times 10^4 \sin^2\left(\frac{\pi t}{0.125}\right) & \text{if } t < 0.125 \\ 0 & \text{otherwise} \end{cases} ``` This time-dependent inflow pressure mimics a pulsatile flow, typical in arterial blood flow. The inflow area $A_{in}$ is determined using the inverse pressure relation, ensuring consistency with the physical model. ### Numerical flux To compute fluxes at cell interfaces, we use a combination of conservative and non-conservative fluxes: ```julia volume_flux = (flux_lax_friedrichs, flux_nonconservative) surface_flux = (flux_lax_friedrichs, flux_nonconservative) ``` - `flux_lax_friedrichs` is a standard numerical flux for hyperbolic conservation laws. - `flux_nonconservative` handles the non-conservative terms in the model, particularly those related to pressure discontinuities. The non-conservative flux function is defined as: ```julia flux_nonconservative(u_ll, u_rr, orientation::Integer, eq::BloodFlowEquations1D) ``` #### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `orientation::Integer`: Orientation index. - `eq`: Instance of `BloodFlowEquations1D`. #### Returns The function returns the non-conservative flux vector, which is essential for capturing sharp pressure changes in the simulation. ### Basis functions and Shock Capturing DG scheme To approximate the solution, we use polynomial basis functions: ```julia basis = LobattoLegendreBasis(2) ``` This defines a **Lobatto-Legendre** basis of polynomial degree $2$, which is commonly used in high-order methods like Discontinuous Galerkin (DG) schemes. We then define an indicator for shock capturing, focusing on the first variable (area perturbation `a`): ```julia id = IndicatorHennemannGassner(eq, basis; variable=first) ``` This indicator helps detect shocks or discontinuities in the solution and applies appropriate stabilization. The solver is defined as: ```julia vol = VolumeIntegralShockCapturingHG(id, volume_flux_dg=volume_flux, volume_flux_fv=surface_flux) solver = DGSEM(basis, surface_flux, vol) ``` Here, `DGSEM` represents the Discontinuous Galerkin Spectral Element Method, a high-order accurate scheme suitable for hyperbolic problems. ### Semi-discretization We are now ready to semi-discretize the problem: ```julia semi = SemidiscretizationHyperbolic( mesh, eq, initial_condition_simple, source_terms = source_term_simple, solver, boundary_conditions=bc ) ``` This step sets up the semi-discretized form of the PDE, which will be advanced in time using an ODE solver. #### Source term The source term accounts for additional forces acting on the blood flow, such as friction: ```julia source_term_simple(u, x, t, eq::BloodFlowEquations1D) ``` #### Parameters - `u`: State vector containing area perturbation, flow rate, elasticity modulus, and reference area. - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations1D`: Instance of the blood flow model. #### Returns The source term vector is given by: - $s_1 = 0$ (no source for area perturbation). - $s_2 = \frac{2 \pi k Q}{R A}$, representing frictional effects. The friction coefficient $k$ is computed using a model-specific `friction` function, and the radius $R$ is obtained from the state vector using the `radius` function. ### Initial condition The initial condition specifies the starting state of the simulation: ```julia initial_condition_simple(x, t, eq::BloodFlowEquations1D; R0=2.0) ``` This function generates a simple initial condition with a uniform radius `R0`. #### Parameters - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations1D`: Instance of the blood flow model. - `R0`: Initial radius (default: `2.0`). #### Returns The function returns a state vector with: - Zero initial area perturbation. - Zero initial flow rate. - Constant elasticity modulus. - Reference area $A_0 = \pi R_0^2$. This simple initial condition is suitable for testing the model without introducing complex dynamics. ### Run the simulation First, we discretize the problem in time: ```julia Trixi.default_analysis_integrals(::BloodFlowEquations1D) = () tspan = (0.0, 0.5) ode = semidiscretize(semi, tspan) ``` Here, `tspan` defines the time interval for the simulation. Next, we add some callbacks to monitor the simulation: ```julia summary_callback = SummaryCallback() analysis_callback = AnalysisCallback(semi, interval=200) stepsize_callback = StepsizeCallback(; cfl=0.5) callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback) ``` - `SummaryCallback` provides a summary of the simulation progress. - `AnalysisCallback` computes analysis metrics at specified intervals. - `StepsizeCallback` adjusts the time step based on the CFL condition. Finally, we solve the problem: ```julia dt = stepsize_callback(ode) sol = solve(ode, SSPRK33(), dt=dt, dtmax=1e-4, dtmin=1e-11, save_everystep=false, saveat=0.002, callback=callbacks) ``` Here, `SSPRK33()` is a third-order Strong Stability Preserving Runge-Kutta method, suitable for hyperbolic PDEs. ### Plot the results The results can be visualized using the following code: ```julia @gif for i in eachindex(sol) a1 = sol[i][1:4:end] Q1 = sol[i][2:4:end] A01 = sol[i][4:4:end] A1 = A01 .+ a1 plot(Q1 ./ A1, lw=4, color=:red, ylim=(-10, 50), label="velocity", legend=:bottomleft) end ``` This code generates an animated GIF showing the evolution of the velocity profile over time. The velocity is computed as $Q/A$, where $Q$ is the flow rate, and $A$ is the cross-sectional area. ### Plain code ```julia using Trixi using BloodFlowTrixi using OrdinaryDiffEq,Plots eq = BloodFlowEquations1D(;h=0.1) mesh = TreeMesh(0.0,40.0,initial_refinement_level=6,n_cells_max=10^4,periodicity=false) bc = ( x_neg = boundary_condition_pressure_in, x_pos = Trixi.BoundaryConditionDoNothing() ) volume_flux = (flux_lax_friedrichs,flux_nonconservative) surface_flux = (flux_lax_friedrichs,flux_nonconservative) basis = LobattoLegendreBasis(2) id = IndicatorHennemannGassner(eq,basis;variable=first) vol = VolumeIntegralShockCapturingHG(id,volume_flux_dg=volume_flux,volume_flux_fv=surface_flux) solver = DGSEM(basis,surface_flux,vol) semi = SemidiscretizationHyperbolic(mesh, eq, initial_condition_simple, source_terms = source_term_simple, solver, boundary_conditions=bc) Trixi.default_analysis_integrals(::BloodFlowEquations1D) = () tspan = (0.0, 0.5) ode = semidiscretize(semi, tspan) summary_callback = SummaryCallback() analysis_callback = AnalysisCallback(semi, interval = 200) stepsize_callback = StepsizeCallback(; cfl=0.5) callbacks = CallbackSet(summary_callback,analysis_callback,stepsize_callback) dt = stepsize_callback(ode) sol = solve(ode, SSPRK33(), dt = dt, dtmax = 1e-4,dtmin = 1e-11, save_everystep = false,saveat = 0.002, callback = callbacks) @gif for i in eachindex(sol) a1 = sol[i][1:4:end] Q1 = sol[i][2:4:end] A01 = sol[i][4:4:end] A1 = A01.+a1 plot(Q1./A1,lw=4,color=:red,ylim=(-10,50),label="velocity",legend=:bottomleft) end ``` ![Alt Text](./graph.gif) ## Tutorial for the 2D model In this section, we describe how to use **BloodFlowTrixi.jl** with **Trixi.jl**. This tutorial will guide you through setting up and running a 2D blood flow simulation, including mesh creation, boundary conditions, numerical fluxes, and visualization of results. ### Packages Before starting, ensure that the required packages are loaded: ```julia using Trixi using BloodFlowTrixi using OrdinaryDiffEq using Plots ``` First, we need to choose the equation that describes the blood flow dynamics: ```julia eq = BloodFlowEquations2D(; h=0.1) ``` Here, `h` represents a parameter related to the initial condition or model scaling. ### Mesh and boundary conditions We begin by defining a two-dimensional P4est mesh, which discretizes the spatial domain: ```julia mesh = P4estMesh( (2,4), polydeg= 2, coordinates_min =(0.0,0.0), coordinates_max = (2*pi,40.0), initial_refinement_level = 4, periodicity = (true, false) ) ``` This generates a non-periodic mesh for the domain $[0,2\pi] \times [0, 40]$, with $2\times 4\times 4^{\text{initialRefinementLevel}}$ cells. In **Trixi.jl**, the P4est mesh has four labeled boundaries: **x_neg** (left boundary), **x_pos** (right boundary), **y_neg** (bottom boundary), and **y_pos** (top boundary). These labels are used to apply boundary conditions: ```julia bc = Dict( :y_neg =>boundary_condition_pressure_in, :y_pos => Trixi.BoundaryConditionDoNothing() ) ``` - `boundary_condition_pressure_in` applies a pressure inflow condition at the bottom boundary. - `Trixi.BoundaryConditionDoNothing()` specifies a "do nothing" boundary condition at the right boundary, meaning no flux is imposed. #### Boundary condition implementation The inflow boundary condition is defined as: ```julia boundary_condition_pressure_in(u_inner, normal, x, t, surface_flux_function, eq::BloodFlowEquations2D) ``` This function applies a time-dependent pressure inflow condition. #### Parameters - `u_inner`: State vector inside the domain near the boundary. - `normal`: Normal of the boundary. - `x`: Position vector. - `t`: Time scalar. - `surface_flux_function`: Function to compute flux at the boundary. - `eq`: Instance of `BloodFlowEquations2D`. #### Returns The boundary flux is computed based on the inflow pressure: ```math P_{\text{in}} = \begin{cases} 2 \times 10^4 \sin^2\left(\frac{\pi t}{0.125}\right) & \text{if } t < 0.125 \\ 0 & \text{otherwise} \end{cases} ``` This time-dependent inflow pressure mimics a pulsatile flow, typical in arterial blood flow. The inflow area $A_{in}$ is determined using the inverse pressure relation, ensuring consistency with the physical model. ### Numerical flux To compute fluxes at cell interfaces, we use a combination of conservative and non-conservative fluxes: ```julia volume_flux = (flux_lax_friedrichs, flux_nonconservative) surface_flux = (flux_lax_friedrichs, flux_nonconservative) ``` - `flux_lax_friedrichs` is a standard numerical flux for hyperbolic conservation laws. - `flux_nonconservative` handles the non-conservative terms in the model, particularly those related to pressure discontinuities. The non-conservative flux function is defined as: ```julia flux_nonconservative(u_ll, u_rr, normal::Integer, eq::BloodFlowEquations2D) ``` #### Parameters - `u_ll`: Left state vector. - `u_rr`: Right state vector. - `normal`: normal vector. - `eq`: Instance of `BloodFlowEquations2D`. #### Returns The function returns the non-conservative flux vector, which is essential for capturing sharp pressure changes in the simulation. ### Basis functions and Shock Capturing DG scheme To approximate the solution, we use polynomial basis functions: ```julia basis = LobattoLegendreBasis(2) ``` This defines a **Lobatto-Legendre** basis of polynomial degree $2$, which is commonly used in high-order methods like Discontinuous Galerkin (DG) schemes. We then define an indicator for shock capturing, focusing on the first variable (area perturbation `$a$`): ```julia id = IndicatorHennemannGassner(eq, basis; variable=first) ``` This indicator helps detect shocks or discontinuities in the solution and applies appropriate stabilization. The solver is defined as: ```julia vol = VolumeIntegralShockCapturingHG(id, volume_flux_dg=volume_flux, volume_flux_fv=surface_flux) solver = DGSEM(basis, surface_flux, vol) ``` Here, `DGSEM` represents the Discontinuous Galerkin Spectral Element Method, a high-order accurate scheme suitable for hyperbolic problems. ### Semi-discretization We are now ready to semi-discretize the problem: ```julia semi = SemidiscretizationHyperbolic( mesh, eq, initial_condition_simple, source_terms = source_term_simple, solver, boundary_conditions=bc ) ``` This step sets up the semi-discretized form of the PDE, which will be advanced in time using an ODE solver. #### Source term The source term accounts for additional forces acting on the blood flow, such as friction: ```julia source_term_simple(u, x, t, eq::BloodFlowEquations2D) ``` #### Parameters - `u`: State vector containing area perturbation, flow rate, elasticity modulus, and reference area. - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations2D`: Instance of the blood flow model. #### Returns The source term vector is given by: - $s_1 = 0$ (no source for area perturbation). - $s_2 = \frac{2R}{3}\mathcal{C} \sin \theta \frac{Q_s^2}{A} + \frac{3Rk Q_{Rθ}}{A}$. - $s_3 = -\frac{2R}{3}\mathcal{C} \sin \theta \frac{Q_s Q_{R\theta}}{A} + \frac{RkQ_{s}}{A}$. The friction coefficient $k$ is computed using a model-specific `friction` function, and the radius $R$ is obtained from the state vector using the `radius` function. Also, the curvature $\mathcal{C}$ is computed using the `curvature` function and is equal to $1$ here. ### Initial condition The initial condition specifies the starting state of the simulation: ```julia initial_condition_simple(x, t, eq::BloodFlowEquations2D; R0=2.0) ``` This function generates a simple initial condition with a uniform radius `R0`. #### Parameters - `x`: Position vector. - `t`: Time scalar. - `eq::BloodFlowEquations2D`: Instance of the blood flow model. - `R0`: Initial radius (default: `2.0`). #### Returns The function returns a state vector with: - Zero initial area perturbation. - Zero initial flow rate (in $\theta$ and $s$ directions). - Constant elasticity modulus. - Reference area $A_0 = \frac{R_0^2}2$. This simple initial condition is suitable for testing the model without introducing complex dynamics. ### Run the simulation First, we discretize the problem in time: ```julia Trixi.default_analysis_integrals(::BloodFlowEquations2D) = () tspan = (0.0, 0.3) ode = semidiscretize(semi, tspan) ``` Here, `tspan` defines the time interval for the simulation. Next, we add some callbacks to monitor the simulation: ```julia summary_callback = SummaryCallback() analysis_callback = AnalysisCallback(semi, interval=200) stepsize_callback = StepsizeCallback(; cfl=0.5) callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback) ``` - `SummaryCallback` provides a summary of the simulation progress. - `AnalysisCallback` computes analysis metrics at specified intervals. - `StepsizeCallback` adjusts the time step based on the CFL condition. Finally, we solve the problem: ```julia dt = stepsize_callback(ode) sol = solve(ode, SSPRK33(), dt=dt, dtmax=1e-4, dtmin=1e-11, save_everystep=false, saveat=0.003, callback=callbacks) ``` Here, `SSPRK33()` is a third-order Strong Stability Preserving Runge-Kutta method, suitable for hyperbolic PDEs. ### Plot the results The results can be visualized using the following code: ```julia @gif for i in eachindex(sol) pd = PlotData2D(sol[i],semi,solution_variables=cons2prim) plt1 = Plots.plot(pd["A"],aspect_ratio=0.2) plt2 = Plots.plot(pd["wtheta"],aspect_ratio=0.2) plt3 = Plots.plot(pd["ws"],aspect_ratio=0.2) plt4 = Plots.plot(pd["P"],aspect_ratio=0.2) plot(plt1,plt2,plt3,plt4,layout=(2,2)) end ``` This code generates an animated GIF showing the evolution of the velocity profile over time. The velocity is computed as $Q/A$, where $Q$ is the flow rate, and $A$ is the cross-sectional area. ### Plain code ```julia using Trixi using BloodFlowTrixi using OrdinaryDiffEq,Plots eq = BloodFlowEquations2D(;h=0.1) mesh = P4estMesh( (2,4), polydeg= 2, coordinates_min =(0.0,0.0), coordinates_max = (2*pi,40.0), initial_refinement_level = 4, periodicity = (true, false) ) bc = Dict( :y_neg =>boundary_condition_pressure_in, :y_pos => Trixi.BoundaryConditionDoNothing() ) volume_flux = (flux_lax_friedrichs,flux_nonconservative) surface_flux = (flux_lax_friedrichs,flux_nonconservative) basis = LobattoLegendreBasis(2) id = IndicatorHennemannGassner(eq,basis;variable=first) vol =VolumeIntegralShockCapturingHG(id,volume_flux_dg = surface_flux,volume_flux_fv = volume_flux) solver = DGSEM(basis,surface_flux,vol) semi = SemidiscretizationHyperbolic(mesh, eq, initial_condition_simple, source_terms = source_term_simple, solver, boundary_conditions = bc) Trixi.default_analysis_integrals(::BloodFlowEquations2D) = () tspan = (0.0, 0.3) ode = semidiscretize(semi, tspan) summary_callback = SummaryCallback() analysis_callback = AliveCallback(analysis_interval=1000) stepsize_callback = StepsizeCallback(; cfl=0.5) callbacks = CallbackSet(summary_callback,analysis_callback,stepsize_callback) dt = stepsize_callback(ode) sol = solve(ode, SSPRK33(),dt=dt, dtmax = 1e-4,dtmin = 1e-12,save_everystep = false,saveat = 0.003, callback = callbacks) @gif for i in eachindex(sol) pd = PlotData2D(sol[i],semi,solution_variables=cons2prim) plt1 = Plots.plot(pd["A"],aspect_ratio=0.2) plt2 = Plots.plot(pd["wtheta"],aspect_ratio=0.2) plt3 = Plots.plot(pd["ws"],aspect_ratio=0.2) plt4 = Plots.plot(pd["P"],aspect_ratio=0.2) plot(plt1,plt2,plt3,plt4,layout=(2,2)) end ``` ![Alt Text](./graph2d.gif) ## Reconstruction of 3D datas In this section, we describe how to reconstruct **3D data** from 1D and 2D blood flow simulations using **BloodFlowTrixi.jl**. This allows us to visualize the reconstructed vessel in a three-dimensional space and export the data for further analysis. ### Overview The reconstruction functions take the simulation results from **1D** and **2D** blood flow models and generate **3D representations**. These representations are computed based on: - The spatial position of the computational grid points. - The computed area perturbation and reference area, used to determine the local vessel radius. - Flow velocity components projected onto the 3D geometry. - Pressure variations along the vessel. The output can be stored in **VTK format**, which allows for visualization with **ParaView** or similar tools. ### Reconstruction from 1D Model For the **1D case**, we assume the vessel follows a straight or predefined curve in 3D space. The reconstructed shape is obtained by rotating the cross-sectional area around a centerline. #### Example Usage ```julia out = "datas/" if isdir(out) rm(out, recursive=true) mkdir(out) end for i in eachindex(sol) datas = get3DData(eq, semi, sol, i, vtk=true, out=out) end ``` This script reconstructs the 3D vessel for each time step of the solution and saves the results in **VTK format**. ![Alt Text](./pressure_3d.gif) ### Reconstruction from 2D Model For the **2D case**, the vessel can follow a more complex curved path. The reconstruction uses: - **A centerline function** `curve(s)`, which gives the position of the vessel as a function of the axial coordinate. - **A normal vector function** `nor(s)`, which defines the local orientation of the vessel. - **A binormal function** to complete the 3D frame. - **A radius function**, which computes the local vessel radius from the area values. #### Example Usage ```julia out = "datas/" if isdir(out) rm(out, recursive=true) mkdir(out) end curve(s) = SA[s/40, cospi(s/40), sinpi(s/40)] / sqrt(1/40^2 + (pi/40)^2) tanj(s) = SA[1/40, -pi/40*sinpi(s/40), pi/40*cospi(s/40)] / sqrt(1/40^2 + (pi/40)^2) nor(s) = SA[0, -cospi(s/40), -sinpi(s/40)] for i in eachindex(sol) datas = get3DData(eq, curve, tanj, nor, semi, sol, i, vtk=true, out=out) end ``` This script reconstructs the 3D vessel geometry using a predefined **curved centerline** and associated **tangent, normal, and binormal vectors**. ![Alt Text](./pressure_curve_3d.gif) ### Visualization Once the VTK files are generated, you can open them in **ParaView**: 1. Open **ParaView**. 2. Click **File > Open**, navigate to your `datas/` folder, and select a `.vtu` file. 3. Click **Apply** to visualize the reconstructed 3D vessel. You can apply filters like **Clip**, **Slice**, and **Glyph** to inspect different regions of the simulation. ### Summary - **1D reconstruction** assumes a straight centerline and rotates the cross-section to create a 3D vessel. - **2D reconstruction** follows a predefined curved centerline with tangent and normal vectors. - **VTK output** enables advanced visualization using **ParaView** or similar tools.
Markdown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/dcmHelpersCommon.cxx
.cxx
30,565
680
#include "dcmHelpersCommon.h" #include "dcmtk/config/osconfig.h" #include "dcmtk/dcmdata/dctk.h" #include "dcmtk/dcmsr/dsriodcc.h" #include "dcmtk/dcmsr/dsrdoc.h" #define WARN_IF_ERROR(X,M) X // List of tags copied from David Clunie's Pixelmed toolkit const DcmTagKey dcmHelpersCommon::patientModuleTags[] = { DCM_PatientName, DCM_PatientID, //Macro IssuerOfPatientIDMacro DCM_IssuerOfPatientID, DCM_IssuerOfPatientIDQualifiersSequence, //EndMacro IssuerOfPatientIDMacro DCM_PatientBirthDate, DCM_PatientSex, DCM_QualityControlSubject, DCM_PatientBirthTime, DCM_ReferencedPatientSequence, DCM_OtherPatientIDsSequence, DCM_OtherPatientNames, DCM_EthnicGroup, DCM_PatientComments, DCM_PatientSpeciesDescription, DCM_PatientSpeciesCodeSequence, DCM_PatientBreedDescription, DCM_PatientBreedCodeSequence, DCM_BreedRegistrationSequence, DCM_ResponsiblePerson, DCM_ResponsiblePersonRole, DCM_ResponsibleOrganization, DCM_PatientIdentityRemoved, DCM_DeidentificationMethod, DCM_DeidentificationMethodCodeSequence }; const DcmTagKey dcmHelpersCommon::clinicalTrialSubjectModuleTags[] = { DCM_ClinicalTrialSubjectID, DCM_ClinicalTrialSponsorName, DCM_ClinicalTrialProtocolID, DCM_ClinicalTrialProtocolName, DCM_ClinicalTrialSiteID, DCM_ClinicalTrialSiteName, DCM_ClinicalTrialSubjectID, DCM_ClinicalTrialSubjectReadingID }; const DcmTagKey dcmHelpersCommon::generalStudyModuleTags[] = { DCM_StudyInstanceUID, DCM_StudyDate, DCM_StudyTime, DCM_ReferringPhysicianName, DCM_ReferringPhysicianIdentificationSequence, DCM_StudyID, DCM_AccessionNumber, DCM_IssuerOfAccessionNumberSequence, DCM_StudyDescription, DCM_PhysiciansOfRecord, DCM_PhysiciansOfRecordIdentificationSequence, DCM_NameOfPhysiciansReadingStudy, DCM_PhysiciansReadingStudyIdentificationSequence, DCM_RequestingServiceCodeSequence, DCM_ReferencedStudySequence, DCM_ProcedureCodeSequence, DCM_ReasonForPerformedProcedureCodeSequence }; const DcmTagKey dcmHelpersCommon::patientStudyModuleTags[] = { DCM_AdmittingDiagnosesDescription, DCM_AdmittingDiagnosesCodeSequence, DCM_PatientAge, DCM_PatientSize, DCM_PatientWeight, DCM_PatientSizeCodeSequence, DCM_Occupation, DCM_AdditionalPatientHistory, DCM_AdmissionID, DCM_IssuerOfAdmissionIDSequence, DCM_ServiceEpisodeID, DCM_IssuerOfServiceEpisodeIDSequence, DCM_ServiceEpisodeDescription, DCM_PatientSexNeutered }; const DcmTagKey dcmHelpersCommon::generalSeriesModuleTags[] = { DCM_Modality, DCM_SeriesInstanceUID, DCM_SeriesNumber, DCM_Laterality, DCM_SeriesDate, DCM_SeriesTime, DCM_PerformingPhysicianName, DCM_PerformingPhysicianIdentificationSequence, DCM_ProtocolName, DCM_SeriesDescription, DCM_SeriesDescriptionCodeSequence, DCM_OperatorsName, DCM_OperatorIdentificationSequence, DCM_ReferencedPerformedProcedureStepSequence, DCM_RelatedSeriesSequence, DCM_BodyPartExamined, DCM_PatientPosition, //DCM_SmallestPixelValueInSeries, //DCM_LargestPixelValueInSeries, DCM_RequestAttributesSequence, //Macro PerformedProcedureStepSummaryMacro DCM_PerformedProcedureStepID, DCM_PerformedProcedureStepStartDate, DCM_PerformedProcedureStepStartTime, DCM_PerformedProcedureStepDescription, DCM_PerformedProtocolCodeSequence, DCM_CommentsOnThePerformedProcedureStep, //EndMacro PerformedProcedureStepSummaryMacro DCM_AnatomicalOrientationType }; const DcmTagKey dcmHelpersCommon::generalEquipmentModuleTags[] = { DCM_Manufacturer, DCM_InstitutionName, DCM_InstitutionAddress, DCM_StationName, DCM_InstitutionalDepartmentName, DCM_ManufacturerModelName, DCM_DeviceSerialNumber, DCM_SoftwareVersions, DCM_GantryID, DCM_SpatialResolution, DCM_DateOfLastCalibration, DCM_TimeOfLastCalibration, DCM_PixelPaddingValue }; const DcmTagKey dcmHelpersCommon::frameOfReferenceModuleTags[] = { DCM_FrameOfReferenceUID, DCM_PositionReferenceIndicator }; const DcmTagKey dcmHelpersCommon::sopCommonModuleTags[] = { DCM_SOPClassUID, DCM_SOPInstanceUID, //DCM_SpecificCharacterSet, DCM_InstanceCreationDate, DCM_InstanceCreationTime, DCM_InstanceCreatorUID, DCM_RelatedGeneralSOPClassUID, DCM_OriginalSpecializedSOPClassUID, DCM_CodingSchemeIdentificationSequence, DCM_TimezoneOffsetFromUTC, DCM_ContributingEquipmentSequence, DCM_InstanceNumber, DCM_SOPInstanceStatus, DCM_SOPAuthorizationDateTime, DCM_SOPAuthorizationComment, DCM_AuthorizationEquipmentCertificationNumber, //Macro DigitalSignaturesMacro //DCM_MACParametersSequence, //DCM_DigitalSignaturesSequence, //EndMacro DigitalSignaturesMacro //DCM_EncryptedAttributesSequence, DCM_OriginalAttributesSequence, DCM_HL7StructuredDocumentReferenceSequence }; const DcmTagKey dcmHelpersCommon::generalImageModuleTags[] = { DCM_ContentDate, DCM_ContentTime }; const DcmTagKey dcmHelpersCommon::srDocumentGeneralModuleTags[] = { DCM_ReferencedRequestSequence, // cw. RequestAttributesSequence in GeneralSeries DCM_PerformedProcedureCodeSequence // cw. ProcedureCodeSequence in GeneralStudy }; void dcmHelpersCommon::copyElement(const DcmTagKey tag, DcmDataset *src, DcmDataset *dest){ DcmElement *e; OFCondition cond; cond = src->findAndGetElement(tag, e, OFFalse, OFTrue); if(cond.good()){ cond = dest->insert(e, true); dest->findAndGetElement(tag,e); char *str; e->getString(str); if(str) std::cout << "Inserted: " << str << std::endl; } }; void dcmHelpersCommon::copyPatientModule(DcmDataset *src, DcmDataset *dest){ for(unsigned int i=0;i<sizeof(patientModuleTags)/sizeof(DcmTagKey);i++) dcmHelpersCommon::copyElement(patientModuleTags[i], src, dest); } void dcmHelpersCommon::copyPatientStudyModule(DcmDataset *src, DcmDataset *dest){ for(unsigned int i=0;i<sizeof(patientStudyModuleTags)/sizeof(DcmTagKey);i++) dcmHelpersCommon::copyElement(patientStudyModuleTags[i], src, dest); } void dcmHelpersCommon::copyGeneralStudyModule(DcmDataset *src, DcmDataset *dest){ for(unsigned int i=0;i<sizeof(generalStudyModuleTags)/sizeof(DcmTagKey);i++) dcmHelpersCommon::copyElement(generalStudyModuleTags[i], src, dest); } void dcmHelpersCommon::copyClinicalTrialSubjectModule(DcmDataset *src, DcmDataset *dest){ for(unsigned int i=0;i<sizeof(clinicalTrialSubjectModuleTags)/sizeof(DcmTagKey);i++) dcmHelpersCommon::copyElement(clinicalTrialSubjectModuleTags[i], src, dest); } /* void dcmHelpersCommon::findAndGetCodedValueFromSequenceItem(DcmItem *seq, DSRCodedEntryValue &codedEntry){ char *elementStr; std::string codeMeaning, codeValue, codingSchemeDesignator; DcmElement *element; if(seq->findAndGetElement(DCM_CodeValue, element).good()){ element->getString(elementStr); codeValue = std::string(elementStr); } if(seq->findAndGetElement(DCM_CodeMeaning, element).good()){ element->getString(elementStr); codeMeaning = std::string(elementStr); } if(seq->findAndGetElement(DCM_CodingSchemeDesignator, element).good()){ element->getString(elementStr); codingSchemeDesignator = std::string(elementStr); } codedEntry.setCode(codeValue.c_str(), codingSchemeDesignator.c_str(), codeMeaning.c_str()); } */ void dcmHelpersCommon::addLanguageOfContent(DSRDocument *doc){ doc->getTree().addContentItem(DSRTypes::RT_hasConceptMod, DSRTypes::VT_Code, DSRTypes::AM_belowCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121049", "DCM", "Language of Content Item and Descendants")); doc->getTree().getCurrentContentItem().setCodeValue( DSRCodedEntryValue("eng","RFC3066","English")); doc->getTree().addContentItem(DSRTypes::RT_hasConceptMod, DSRTypes::VT_Code, DSRTypes::AM_belowCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121046", "DCM", "Country of Language")); doc->getTree().getCurrentContentItem().setCodeValue( DSRCodedEntryValue("US","ISO3166_1","United States")); doc->getTree().goUp(); } void dcmHelpersCommon::addObservationContext(DSRDocument *doc){ dcmHelpersCommon::addObserverContext(doc); dcmHelpersCommon::addProcedureContext(doc); dcmHelpersCommon::addSubjectContext(doc); } // TODO: parameterize the actual values initialized void dcmHelpersCommon::addObserverContext(DSRDocument *doc, const char* deviceObserverUID, const char* deviceObserverName, const char* deviceObserverManufacturer, const char* deviceObserverModelName, const char* deviceObserverSerialNumber){ // TODO: TID 1001 Observation context doc->getTree().addContentItem(DSRTypes::RT_hasObsContext, DSRTypes::VT_Code, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121005","DCM","Observer Type")); doc->getTree().getCurrentContentItem().setCodeValue( DSRCodedEntryValue("121007","DCM","Device")); // TODO: need to decide what UIDs we will use doc->getTree().addContentItem(DSRTypes::RT_hasObsContext, DSRTypes::VT_UIDRef, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121012","DCM","Device Observer UID")); doc->getTree().getCurrentContentItem().setStringValue(deviceObserverUID); doc->getTree().addContentItem(DSRTypes::RT_hasObsContext, DSRTypes::VT_Text, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121013","DCM","Device Observer Name")); doc->getTree().getCurrentContentItem().setStringValue(deviceObserverName); doc->getTree().addContentItem(DSRTypes::RT_hasObsContext, DSRTypes::VT_Text, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121014","DCM","Device Observer Manufacturer")); doc->getTree().getCurrentContentItem().setStringValue(deviceObserverManufacturer); doc->getTree().addContentItem(DSRTypes::RT_hasObsContext, DSRTypes::VT_Text, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121015","DCM","Device Observer Model Name")); doc->getTree().getCurrentContentItem().setStringValue(deviceObserverModelName); doc->getTree().addContentItem(DSRTypes::RT_hasObsContext, DSRTypes::VT_Text, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("121016","DCM","Device Observer Serial Number")); doc->getTree().getCurrentContentItem().setStringValue(deviceObserverSerialNumber); } void dcmHelpersCommon::addProcedureContext(DSRDocument *doc){ // TODO (void) doc; // avoid warning: unused parameter } void dcmHelpersCommon::addSubjectContext(DSRDocument *doc){ // TODO (void) doc; // avoid warning: unused parameter } /* * Add Image Library entry (TID 4020) for the specified SR document * and DcmDataset correspnding to an image to the document. */ void dcmHelpersCommon::addImageLibraryEntry(DSRDocument *doc, DcmDataset *imgDataset){ DcmElement *element; DcmItem *sequenceItem; std::string sopClassUID, sopInstanceUID; char* elementStr; //float* elementFloat; OFString elementOFString; imgDataset->findAndGetElement(DCM_SOPClassUID, element); element->getString(elementStr); sopClassUID = std::string(elementStr); imgDataset->findAndGetElement(DCM_SOPInstanceUID, element); element->getString(elementStr); sopInstanceUID = std::string(elementStr); doc->getTree().addContentItem(DSRTypes::RT_contains,DSRTypes::VT_Image, DSRTypes::AM_belowCurrent); DSRImageReferenceValue imageReference = DSRImageReferenceValue(sopClassUID.c_str(), sopInstanceUID.c_str()); doc->getTree().getCurrentContentItem().setImageReference(imageReference); DSRCodedEntryValue codedValue; DSRTypes::E_AddMode addMode = DSRTypes::AM_belowCurrent; // Image Laterality if(imgDataset->findAndGetSequenceItem(DCM_ImageLaterality,sequenceItem).good()){ codedValue.readSequence(*imgDataset, DCM_ImageLaterality,"2"); //findAndGetCodedValueFromSequenceItem(sequenceItem, codedValue); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Code, DSRTypes::AM_belowCurrent); addMode = DSRTypes::AM_afterCurrent; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111027","DCM","Image Laterality")); doc->getTree().getCurrentContentItem().setCodeValue(codedValue); } // Image View if(imgDataset->findAndGetSequenceItem(DCM_ViewCodeSequence,sequenceItem).good()){ //findAndGetCodedValueFromSequenceItem(sequenceItem,codedValue); codedValue.readSequence(*imgDataset, DCM_ViewCodeSequence, "2"); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Code, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111031","DCM","Image View")); doc->getTree().getCurrentContentItem().setCodeValue(codedValue); //if(imgDataset->findAndGetSequenceItem(DCM_ViewModifierCodeSequence,sequenceItem).good()){ if(codedValue.readSequence(*imgDataset, DCM_ViewModifierCodeSequence, "2").good()){ //findAndGetCodedValueFromSequenceItem(sequenceItem,codedValue); doc->getTree().addContentItem(DSRTypes::RT_hasConceptMod, DSRTypes::VT_Code, DSRTypes::AM_belowCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111032","DCM","Image View Modifier")); doc->getTree().getCurrentContentItem().setCodeValue(codedValue); doc->getTree().goUp(); } } // Patient Orientation - Row and Column separately if(imgDataset->findAndGetElement(DCM_PatientOrientation, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Text, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111044","DCM","Patient Orientation Row")); doc->getTree().getCurrentContentItem().setStringValue(elementOFString.c_str()); element->getOFString(elementOFString, 1); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Text, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111043","DCM","Patient Orientation Column")); doc->getTree().getCurrentContentItem().setStringValue(elementOFString.c_str()); } // Study date if(imgDataset->findAndGetElement(DCM_StudyDate, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Date, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111060","DCM","Study Date")); doc->getTree().getCurrentContentItem().setStringValue(elementOFString.c_str()); } // Study time if(imgDataset->findAndGetElement(DCM_StudyTime, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Time, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111061","DCM","Study Time")); doc->getTree().getCurrentContentItem().setStringValue(elementOFString.c_str()); } // Content date if(imgDataset->findAndGetElement(DCM_ContentDate, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Date, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111018","DCM","Content Date")); doc->getTree().getCurrentContentItem().setStringValue(elementOFString.c_str()); } // Content time if(imgDataset->findAndGetElement(DCM_ContentTime, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Time, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111019","DCM","Content Time")); doc->getTree().getCurrentContentItem().setStringValue(elementOFString.c_str()); } // Pixel Spacing - horizontal and vertical separately if(imgDataset->findAndGetElement(DCM_PixelSpacing, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111026","DCM","Horizontal Pixel Spacing")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("mm","UCUM","millimeter"))); element->getOFString(elementOFString, 1); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("111066","DCM","Vertical Pixel Spacing")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("mm","UCUM","millimeter"))); } // Positioner Primary Angle if(imgDataset->findAndGetElement(DCM_PositionerPrimaryAngle, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("112011","DCM","Positioner Primary Angle")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("deg","UCUM","degrees of plane angle"))); } // Positioner Secondary Angle if(imgDataset->findAndGetElement(DCM_PositionerSecondaryAngle, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("112012","DCM","Positioner Secondary Angle")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("deg","UCUM","degrees of plane angle"))); } // TODO // Spacing between slices: May be computed from the Image Position (Patient) (0020,0032) // projected onto the normal to the Image Orientation (Patient) (0020,0037) if present; // may or may not be the same as the Spacing Between Slices (0018,0088) if present. // Slice thickness/ if(imgDataset->findAndGetElement(DCM_SliceThickness, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("112225","DCM","Slice Thickness")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("mm","UCUM","millimeter"))); } // Frame of reference if(imgDataset->findAndGetElement(DCM_FrameOfReferenceUID, element).good()){ element->getOFString(elementOFString,0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_UIDRef, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("112227","DCM","Frame of Reference UID")); doc->getTree().getCurrentContentItem().setStringValue(elementOFString); } // Image Position Patient if(imgDataset->findAndGetElement(DCM_ImagePositionPatient, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110901","DCM","Image Position (Patient) X")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("mm","UCUM","millimeter"))); element->getOFString(elementOFString, 1); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110902","DCM","Image Position (Patient) Y")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("mm","UCUM","millimeter"))); element->getOFString(elementOFString, 2); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110903","DCM","Image Position (Patient) Z")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("mm","UCUM","millimeter"))); } // Image Orientation Patient if(imgDataset->findAndGetElement(DCM_ImageOrientationPatient, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110904","DCM","Image Orientation (Patient) Row X")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{-1:1}","UCUM","{-1:1}"))); element->getOFString(elementOFString, 1); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110905","DCM","Image Orientation (Patient) Row Y")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{-1:1}","UCUM","{-1:1}"))); element->getOFString(elementOFString, 2); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110906","DCM","Image Orientation (Patient) Row Z")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{-1:1}","UCUM","{-1:1}"))); element->getOFString(elementOFString, 3); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110907","DCM","Image Orientation (Patient) Column X")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{-1:1}","UCUM","{-1:1}"))); element->getOFString(elementOFString, 4); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110908","DCM","Image Orientation (Patient) Column Y")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{-1:1}","UCUM","{-1:1}"))); element->getOFString(elementOFString, 5); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110909","DCM","Image Orientation (Patient) Column Z")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{-1:1}","UCUM","{-1:1}"))); } // Image Orientation Patient if(imgDataset->findAndGetElement(DCM_Rows, element).good()){ element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, addMode); addMode = addMode == DSRTypes::AM_belowCurrent ? DSRTypes::AM_afterCurrent : addMode; doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110910","DCM","Pixel Data Rows")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{pixels}","UCUM","pixels"))); imgDataset->findAndGetElement(DCM_Columns, element); element->getOFString(elementOFString, 0); doc->getTree().addContentItem(DSRTypes::RT_hasAcqContext, DSRTypes::VT_Num, DSRTypes::AM_afterCurrent); doc->getTree().getCurrentContentItem().setConceptName( DSRCodedEntryValue("110911","DCM","Pixel Data Columns")); doc->getTree().getCurrentContentItem().setNumericValue( DSRNumericMeasurementValue(elementOFString.c_str(), DSRCodedEntryValue("{pixels}","UCUM","pixels"))); } doc->getTree().goUp(); // up to image level doc->getTree().goUp(); // up to image library container level }
Unknown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/dcmUnitsConversionHelper.h
.h
527
15
/*---------------------------------------------------------------------- Helper functions for converting units in SUVFactorCalculator.cxx -----------------------------------------------------------------------*/ #ifndef __dcmUnitsConversionHelper_h #define __dcmUnitsConversionHelper_h double ConvertTimeToSeconds(const char *time ); double ConvertWeightUnits(double count, const char *fromunits, const char *tounits ); double ConvertRadioactivityUnits(double count, const char *fromunits, const char *tounits ); #endif
Unknown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/dcmUnitsConversionHelper.cxx
.cxx
13,871
614
#include "dcmUnitsConversionHelper.h" #include <iostream> #include <cstdlib> #include <cstring> //#include "dcmtk/config/osconfig.h" //#include "dcmtk/dcmdata/dctk.h" //#include "dcmtk/dcmsr/dsriodcc.h" //#include "dcmtk/dcmsr/dsrdoc.h" // ... // ............................................................................................... // ... double ConvertTimeToSeconds(const char *time ) { if( time == NULL ) { std::cerr << "ConvertTimeToSeconds got a NULL time string." << std::endl; return -1.0; } std::string h; std::string m; std::string minAndsecStr; std::string secStr; double hours; double minutes; double seconds; if( time == NULL ) { return 0.0; } // --- // --- time will be in format HH:MM:SS.SSSS // --- convert to a double count of seconds. // --- std::string timeStr = time; //size_t i = timeStr.find_first_of(":"); h = timeStr.substr( 0, 2 ); hours = atof( h.c_str() ); minAndsecStr = timeStr.substr( 3 ); //size_t i = minAndsecStr.find_first_of( ":" ); m = minAndsecStr.substr(0, 2 ); minutes = atof( m.c_str() ); secStr = minAndsecStr.substr( 3 ); seconds = atof( secStr.c_str() ); double retval = ( seconds + (60.0 * minutes) + (3600.0 * hours ) ); return retval; }; // ... // ............................................................................................... // ... double ConvertWeightUnits(double count, const char *fromunits, const char *tounits ) { double conversion = count; if( fromunits == NULL ) { std::cout << "Got NULL parameter fromunits. A bad param was probably specified." << std::endl; return -1.0; } if( tounits == NULL ) { std::cout << "Got NULL parameter from tounits. A bad parameter was probably specified." << std::endl; return -1.0; } /* possibilities include: --------------------------- "kilograms [kg]" "grams [g]" "pounds [lb]" */ // --- kg to... if( !strcmp(fromunits, "kg") ) { if( !strcmp(tounits, "kg") ) { return conversion; } else if( !strcmp(tounits, "g") ) { conversion *= 1000.0; } else if( !strcmp(tounits, "lb") ) { conversion *= 2.2; } } else if( !strcmp(fromunits, "g") ) { if( !strcmp(tounits, "kg") ) { conversion /= 1000.0; } else if( !strcmp(tounits, "g") ) { return conversion; } else if( !strcmp(tounits, "lb") ) { conversion *= .0022; } } else if( !strcmp(fromunits, "lb") ) { if( !strcmp(tounits, "kg") ) { conversion *= 0.45454545454545453; } else if( !strcmp(tounits, "g") ) { conversion *= 454.54545454545453; } else if( !strcmp(tounits, "lb") ) { return conversion; } } return conversion; }; // ... // ............................................................................................... // ... double ConvertRadioactivityUnits(double count, const char *fromunits, const char *tounits ) { double conversion = count; if( fromunits == NULL ) { std::cout << "Got NULL parameter in fromunits. A bad parameter was probably specified." << std::endl; return -1.0; } if( tounits == NULL ) { std::cout << "Got NULL parameter in tounits. A bad parameter was probably specified." << std::endl; return -1.0; } /* possibilities include: --------------------------- "megabecquerels [MBq]" "kilobecquerels [kBq]" "becquerels [Bq]" "millibecquerels [mBq]" "microbecquerels [uBq] "megacuries [MCi]" "kilocuries [kCi]" "curies [Ci]" "millicuries [mCi]" "microcuries [uCi]" */ // --- MBq to... if( !strcmp(fromunits, "MBq" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { return conversion; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 1000.0; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 1000000.0; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 1000000000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 1000000000000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.000000000027027027027027; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.000000027027027027027; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 0.000027027027027027; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 0.027027027027027; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 27.027027027; } } // --- kBq to... else if( !strcmp(fromunits, "kBq" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= .001; } else if( !(strcmp(tounits, "kBq" ) ) ) { return conversion; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 1000.0; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 1000000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 1000000000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.000000000000027027027027027; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.000000000027027027027027; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 0.000000027027027027027; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 0.000027027027027027; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 0.027027027027027; } } // --- Bq to... else if( !strcmp(fromunits, "Bq" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 0.000001; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 0.001; } else if( !(strcmp(tounits, "Bq" ) ) ) { return conversion; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 1000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 1000000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.000000000000000027027027027027; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.000000000000027027027027027; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 0.000000000027027027027027; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 0.000000027027027027027; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 0.000027027027027027; } } // --- mBq to... else if( !strcmp(fromunits, "mBq" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 0.000000001; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 0.000001; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 0.001; } else if( !(strcmp(tounits, "mBq" ) ) ) { return conversion; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 1000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.00000000000000000002702702702702; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.000000000000000027027027027027; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 0.000000000000027027027027027; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 0.000000000027027027027027; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 0.000000027027027027027; } } // --- uBq to... else if( !strcmp(fromunits, "uBq" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 0.000000000001; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 0.000000001; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 0.000001; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 0.001; } else if( !(strcmp(tounits, " uBq" ) ) ) { return conversion; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.000000000000000000000027027027027027; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.000000000000000000027027027027027; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 0.000000000000000027027027027027; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 0.000000000000027027027027027; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 0.000000000027027027027027; } } // --- MCi to... else if( !strcmp(fromunits, "MCi" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 37000000000.0; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 37000000000000.0; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 37000000000000000.0; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 37000000000000000000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 37000000000000000000848.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { return conversion; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 1000.0; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 1000000.0; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 1000000000.0; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 1000000000000.0; } } // --- kCi to... else if( !strcmp(fromunits, "kCi" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 37000000.0; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 37000000000.0; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 37000000000000.0; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 37000000000000000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 37000000000000000000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.001; } else if( !(strcmp(tounits, "kCi" ) ) ) { return conversion; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 1000.0; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 1000000.0; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 1000000000.0; } } // --- Ci to... else if( !strcmp(fromunits, "Ci" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 37000.0; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 37000000.0; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 37000000000.0; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 37000000000000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 37000000000000000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.0000010; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.001; } else if( !(strcmp(tounits, "Ci" ) ) ) { return conversion; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 1000.0; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 1000000.0; } } // --- mCi to... else if( !strcmp(fromunits, "mCi" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 37.0; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 37000.0; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 37000000.0; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 37000000000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 37000000000000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.0000000010; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.0000010; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 0.001; } else if( !(strcmp(tounits, "mCi" ) ) ) { return conversion; } else if( !(strcmp(tounits, "uCi" ) ) ) { conversion *= 1000.0; } } // --- uCi to... else if( !strcmp(fromunits, " uCi" ) ) { if( !(strcmp(tounits, "MBq" ) ) ) { conversion *= 0.037; } else if( !(strcmp(tounits, "kBq" ) ) ) { conversion *= 37.0; } else if( !(strcmp(tounits, "Bq" ) ) ) { conversion *= 37000.0; } else if( !(strcmp(tounits, "mBq" ) ) ) { conversion *= 37000000.0; } else if( !(strcmp(tounits, " uBq" ) ) ) { conversion *= 37000000000.0; } else if( !(strcmp(tounits, "MCi" ) ) ) { conversion *= 0.0000000000010; } else if( !(strcmp(tounits, "kCi" ) ) ) { conversion *= 0.0000000010; } else if( !(strcmp(tounits, "Ci" ) ) ) { conversion *= 0.0000010; } else if( !(strcmp(tounits, "mCi" ) ) ) { conversion *= 0.001; } else if( !(strcmp(tounits, "uCi" ) ) ) { return conversion; } } return conversion; };
Unknown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/SUVFactorCalculator.cxx
.cxx
46,327
1,193
#include "SUVFactorCalculatorCLP.h" // VTK includes #include <vtkGlobFileNames.h> #include <vtksys/Directory.hxx> // ITK includes #include <itkGDCMSeriesFileNames.h> #include <itkImageSeriesReader.h> #include <itkImageFileReader.h> #include <itkNrrdImageIO.h> #include <itkImageFileWriter.h> #include <itkImageRegionConstIterator.h> #include <itkImageDuplicator.h> #include <itkMinimumMaximumImageCalculator.h> #include <itkShiftScaleImageFilter.h> #include "itkGDCMImageIO.h" #include "itkNumericTraits.h" #undef HAVE_SSTREAM #include "itkDCMTKFileReader.h" #include <iostream> #include <sstream> #include <math.h> // DCMTK includes #include "dcmtk/config/osconfig.h" /* make sure OS specific configuration is included first */ #include "dcmtk/ofstd/ofstream.h" #include "dcmtk/dcmdata/dcuid.h" #include "dcmtk/dcmdata/dcfilefo.h" #include "dcmtk/dcmsr/dsriodcc.h" #include "dcmtk/dcmsr/dsrdoc.h" #include "dcmtk/dcmdata/dcdeftag.h" // versioning info #include "vtkSUVFactorCalculatorVersionConfigure.h" // helpers #include "dcmHelpersCommon.h" #include "dcmUnitsConversionHelper.h" // ... // ............................................................................................... // ... /* SOME NOTES on SUV and parameters of interest: This is the first-pass implementation we'll make: Standardized uptake value, SUV, (also referred to as the dose uptake ratio, DUR) is a widely used, simple PET quantifier, calculated as a ratio of tissue radioactivity concentration (e.g. in units kBq/ml) at time T, CPET(T) and injected dose (e.g. in units MBq) at the time of injection divided by body weight (e.g. in units kg). SUVbw = CPET(T) / (Injected dose / Patient's weight) Instead of body weight, the injected dose may also be corrected by the lean body mass, or body surface area (BSA) (Kim et al., 1994). Verbraecken et al. (2006) review the different formulas for calculating the BSA. SUVbsa= CPET(T) / (Injected dose / BSA) If the above mentioned units are used, the unit of SUV will be g/ml. === Later, we can try a more careful computation that includes decay correction: Most PET systems record their pixels in units of activity concentration (MBq/ml) (once Rescale Slope has been applied, and the units are specified in the Units attribute). To compute SUVbw, for example, it is necessary to apply the decay formula and account for the patient's weight. For that to be possible, during de-identification, the Patient's Weight must not have been removed, and even though dates may have been removed, it is important not to remove the times, since the difference between the time of injection and the acquisition time is what is important. In particular, DO NOT REMOVE THE FOLLOWING DICOM TAGS: * Radiopharmaceutical Start Time (0018,1072) * Decay Correction (0054,1102) * Decay Factor (0054,1321) * Frame Reference Time (0054,1300) * Radionuclide Half Life (0018,1075) * Series Time (0008,0031) * Patient's Weight (0010,1030) Note that to calculate other common SUV values like SUVlbm and SUVbsa, you also need to retain: * Patient's Sex (0010,0040) * Patient's Size (0010,1020) If there is a strong need to remove times from an identity leakage perspective, then one can normalize all times to some epoch, but it has to be done consistently across all images in the entire study (preferably including the CT reference images); note though, that the time of injection may be EARLIER than the Study Time, which you might assume would be the earliest, so it takes a lot of effort to get this right. For Philips images, none of this applies, and the images are in COUNTS and the private tag (7053,1000) SUV Factor must be used. To calculate the SUV of a particular pixel, you just have to calculate [pixel _value * tag 7053|1000 ] The tag 7053|1000 is a number (double) taking into account tn ihe patient's weight, the injection quantity. We get the tag from the original file with: double suv; itk::ExposeMetaData<double>(*dictionary[i], "7053|1000", suv); */ // ... // ............................................................................................... // ... // Use an anonymous namespace to keep class types and function names // from colliding when module is used as shared object module. Every // thing should be in an anonymous namespace except for the module // entry point, e.g. main() // namespace { using OutputVolumeType = itk::Image<float, 3>; struct parameters { std::string PETDICOMPath; std::string PETSeriesInstanceUID; std::string patientName; std::string studyDate; std::string radioactivityUnits; std::string weightUnits; std::string heightUnits; std::string volumeUnits; double injectedDose; //double calibrationFactor; double patientWeight; double patientHeight; //in meters std::string patientSex; std::string seriesReferenceTime; std::string injectionTime; std::string decayCorrection; std::string decayFactor; std::string radionuclideHalfLife; std::string frameReferenceTime; std::string returnParameterFile; std::string correctedImage; double SUVbwConversionFactor; double SUVlbmConversionFactor; double SUVbsaConversionFactor; double SUVibwConversionFactor; bool outputVolumeRequested; typename OutputVolumeType::Pointer unnormalizedVolume; std::vector< std::string > PETFilenames; std::string RWVMFile; short maxPixelValue; std::string seriesDescription; std::string seriesNumber; std::string instanceNumber; }; // ... // ............................................................................................... // ... double DecayCorrection(parameters & list, double injectedDose ) { double scanTimeSeconds = ConvertTimeToSeconds(list.seriesReferenceTime.c_str() ); //double scanTimeSeconds = ConvertTimeToSeconds(list.seriesTime.c_str()); double startTimeSeconds = ConvertTimeToSeconds( list.injectionTime.c_str() ); std::cout << " RAD. START TIME: " << startTimeSeconds << std::endl; std::cout << " SERIES TIME: " << scanTimeSeconds << std::endl; //double startTimeSeconds = ConvertTimeToSeconds( list.radiopharmStartTime.c_str()); double halfLife = atof( list.radionuclideHalfLife.c_str() ); double decayTime = scanTimeSeconds - startTimeSeconds; double decayedDose = injectedDose * (double)pow(2.0, -(decayTime / halfLife) ); std::cout << " DECAYED DOSE: " << decayedDose << std::endl; return decayedDose; } // ... // ............................................................................................... // ... bool WriteNormalizedImage(OutputVolumeType::Pointer image, std::string filename, double normalizationFactor, bool useCompression=false) { std::cout << "Writing normalized image " << filename << std::endl; try { using NormalizationFilterType = itk::ShiftScaleImageFilter<OutputVolumeType, OutputVolumeType> ; auto normalize = NormalizationFilterType::New(); normalize->SetShift(0.0); normalize->SetScale(normalizationFactor); normalize->SetInput(image); using WriterType = itk::ImageFileWriter<OutputVolumeType>; auto writer = WriterType::New(); writer->SetInput( normalize->GetOutput() ); writer->SetFileName( filename ); writer->SetUseCompression(useCompression); writer->Update(); } catch (itk::ExceptionObject &ex) { std::cout << ex << std::endl; return false; } return true; } // ... // ............................................................................................... // ... int LoadImagesAndComputeSUV( parameters & list ) { typedef short PixelValueType; typedef itk::Image< PixelValueType, 3 > VolumeType; typedef itk::ImageSeriesReader< VolumeType > VolumeReaderType; // read the DICOM dir to get the radiological data typedef itk::GDCMSeriesFileNames InputNamesGeneratorType; if ( !list.PETDICOMPath.compare("")) { std::cerr << "GetParametersFromDicomHeader:Got empty list.PETDICOMPath." << std::endl; return EXIT_FAILURE; } //--- catch non-dicom data vtkGlobFileNames* gfn = vtkGlobFileNames::New(); gfn->SetDirectory(list.PETDICOMPath.c_str()); gfn->AddFileNames("*.nhdr"); gfn->AddFileNames("*.nrrd"); gfn->AddFileNames("*.hdr"); gfn->AddFileNames("*.mha"); gfn->AddFileNames("*.img"); gfn->AddFileNames("*.nii"); gfn->AddFileNames("*.nia"); int notDICOM = 0; int nFiles = gfn->GetNumberOfFileNames(); if (nFiles > 0) { notDICOM = 1; } gfn->Delete(); if ( notDICOM ) { std::cerr << "PET Dicom parameter doesn't point to a dicom directory!" << std::endl; return EXIT_FAILURE; } InputNamesGeneratorType::Pointer inputNames = InputNamesGeneratorType::New(); inputNames->SetUseSeriesDetails(false); // Series details not necessary to distinguish between different PET scans inputNames->SetDirectory(list.PETDICOMPath); itk::SerieUIDContainer seriesUIDs = inputNames->GetSeriesUIDs(); std::string selectedSeriesUID = list.PETSeriesInstanceUID.length()>0 ? list.PETSeriesInstanceUID : seriesUIDs[0]; if (std::find(seriesUIDs.begin(), seriesUIDs.end(), selectedSeriesUID) == seriesUIDs.end()) { std::cerr << "Selected series instance UID not found in PET dicom path!" << std::endl; return EXIT_FAILURE; } list.PETFilenames = inputNames->GetFileNames(selectedSeriesUID); typedef short PixelValueType; typedef itk::Image< PixelValueType, 3 > VolumeType; typedef itk::ImageSeriesReader< VolumeType > VolumeReaderType; itk::GDCMImageIO::Pointer dicomIO = itk::GDCMImageIO::New(); const VolumeReaderType::FileNamesContainer & filenames = inputNames->GetFileNames(selectedSeriesUID); VolumeReaderType::Pointer volumeReader = VolumeReaderType::New(); volumeReader->SetImageIO( dicomIO ); volumeReader->SetFileNames( filenames ); try{ volumeReader->Update(); } catch (itk::ExceptionObject &ex) { std::cout << ex << std::endl; return EXIT_FAILURE; } // Determine largest value using MinMaxCalculatorType = itk::MinimumMaximumImageCalculator<VolumeType>; auto calc = MinMaxCalculatorType::New(); calc->SetImage(volumeReader->GetOutput()); calc->Compute(); list.maxPixelValue = calc->GetMaximum(); if (list.outputVolumeRequested){ // read image with precision of output volume auto reader = itk::ImageSeriesReader< OutputVolumeType >::New(); reader->SetImageIO( dicomIO ); reader->SetFileNames( filenames ); reader->Update(); list.unnormalizedVolume = reader->GetOutput(); } std::string tag; std::string yearstr; std::string monthstr; std::string daystr; std::string hourstr; std::string minutestr; std::string secondstr; int len; // Nuclear Medicine DICOM info: /* 0054,0016 Radiopharmaceutical Information Sequence: 0018,1072 Radionuclide Start Time: 090748.000000 0018,1074 Radionuclide Total Dose: 370500000 0018,1075 Radionuclide Half Life: 6586.2 0018,1076 Radionuclide Positron Fraction: 0 */ int parsingDICOM = 0; itk::DCMTKFileReader fileReader; fileReader.SetFileName(filenames[0]); fileReader.LoadFile(); itk::DCMTKSequence seq; if(fileReader.GetElementSQ(0x0054,0x0016,seq,false) == EXIT_SUCCESS) { parsingDICOM = 1; //--- //--- Radiopharmaceutical Start Time seq.GetElementTM(0x0018,0x1072,tag); //--- expect A string of characters of the format hhmmss.frac; //---where hh contains hours (range "00" - "23"), mm contains minutes //---(range "00" - "59"), ss contains seconds (range "00" - "59"), and frac //---contains a fractional part of a second as small as 1 millionth of a //---second (range "000000" - "999999"). A 24 hour clock is assumed. //---Midnight can be represented by only "0000" since "2400" would //---violate the hour range. The string may be padded with trailing //---spaces. Leading and embedded spaces are not allowed. One //---or more of the components mm, ss, or frac may be unspecified //---as long as every component to the right of an unspecified //---component is also unspecified. If frac is unspecified the preceding "." //---may not be included. Frac shall be held to six decimal places or //---less to ensure its format conforms to the ANSI //---Examples - //---1. "070907.0705" represents a time of 7 hours, 9 minutes and 7.0705 seconds. //---2. "1010" represents a time of 10 hours, and 10 minutes. //---3. "021" is an invalid value. if ( tag.c_str() == NULL || *(tag.c_str()) == '\0' ) { list.injectionTime = "MODULE_INIT_NO_VALUE" ; } else { len = tag.length(); hourstr.clear(); minutestr.clear(); secondstr.clear(); if ( len >= 2 ) { hourstr = tag.substr(0, 2); } else { hourstr = "00"; } if ( len >= 4 ) { minutestr = tag.substr(2, 2); } else { minutestr = "00"; } if ( len >= 6 ) { secondstr = tag.substr(4); } else { secondstr = "00"; } tag.clear(); tag = hourstr.c_str(); tag += ":"; tag += minutestr.c_str(); tag += ":"; tag += secondstr.c_str(); list.injectionTime = tag.c_str(); } //--- //--- Radionuclide Total Dose if(seq.GetElementDS(0x0018,0x1074,1,&list.injectedDose,false) != EXIT_SUCCESS) { list.injectedDose = 0.0; } //--- //--- RadionuclideHalfLife //--- Expect a Decimal String //--- A string of characters representing either //--- a fixed point number or a floating point number. //--- A fixed point number shall contain only the characters 0-9 //--- with an optional leading "+" or "-" and an optional "." to mark //--- the decimal point. A floating point number shall be conveyed //--- as defined in ANSI X3.9, with an "E" or "e" to indicate the start //--- of the exponent. Decimal Strings may be padded with leading //--- or trailing spaces. Embedded spaces are not allowed. if(seq.GetElementDS(0x0018,0x1075,list.radionuclideHalfLife,false) != EXIT_SUCCESS) { list.radionuclideHalfLife = "MODULE_INIT_NO_VALUE"; } //--- //---Radionuclide Positron Fraction //--- not currently using this one? std::string radioNuclidePositronFraction; if(seq.GetElementDS(0x0018,0x1075,radioNuclidePositronFraction,false) != EXIT_SUCCESS) { radioNuclidePositronFraction = "MODULE_INIT_NO_VALUE"; } //-- //--- UNITS: something like BQML: //--- CNTS, NONE, CM2, PCNT, CPS, BQML, //--- MGMINML, UMOLMINML, MLMING, MLG, //--- 1CM, UMOLML, PROPCNTS, PROPCPS, //--- MLMINML, MLML, GML, STDDEV //--- if(fileReader.GetElementCS(0x0054,0x1001,tag,false) == EXIT_SUCCESS) { //--- I think these are piled together. MBq ml... search for all. std::string units = tag.c_str(); if ( ( units.find ("BQML") != std::string::npos) || ( units.find ("BQML") != std::string::npos) ) { list.radioactivityUnits= "Bq"; } else if ( ( units.find ("MBq") != std::string::npos) || ( units.find ("MBQ") != std::string::npos) ) { list.radioactivityUnits = "MBq"; } else if ( (units.find ("kBq") != std::string::npos) || (units.find ("kBQ") != std::string::npos) || (units.find ("KBQ") != std::string::npos) ) { list.radioactivityUnits = "kBq"; } else if ( (units.find ("mBq") != std::string::npos) || (units.find ("mBQ") != std::string::npos) ) { list.radioactivityUnits = "mBq"; } else if ( (units.find ("uBq") != std::string::npos) || (units.find ("uBQ") != std::string::npos) ) { list.radioactivityUnits = "uBq"; } else if ( (units.find ("Bq") != std::string::npos) || (units.find ("BQ") != std::string::npos) ) { list.radioactivityUnits = "Bq"; } else if ( (units.find ("MCi") != std::string::npos) || ( units.find ("MCI") != std::string::npos) ) { list.radioactivityUnits = "MCi"; } else if ( (units.find ("kCi") != std::string::npos) || (units.find ("kCI") != std::string::npos) || (units.find ("KCI") != std::string::npos) ) { list.radioactivityUnits = "kCi"; } else if ( (units.find ("mCi") != std::string::npos) || (units.find ("mCI") != std::string::npos) ) { list.radioactivityUnits = "mCi"; } else if ( (units.find ("uCi") != std::string::npos) || (units.find ("uCI") != std::string::npos) ) { list.radioactivityUnits = "uCi"; } else if ( (units.find ("Ci") != std::string::npos) || (units.find ("CI") != std::string::npos) ) { list.radioactivityUnits = "Ci"; } list.volumeUnits = "ml"; } else { //--- default values. list.radioactivityUnits = "MBq"; list.volumeUnits = "ml"; } //--- //--- DecayCorrection //--- Possible values are: //--- NONE = no decay correction //--- START= acquisition start time //--- ADMIN = radiopharmaceutical administration time //--- Frame Reference Time is the time that the pixel values in the Image occurred. //--- It's defined as the time offset, in msec, from the Series Reference Time. //--- Series Reference Time is defined by the combination of: //--- Series Date (0008,0021) and //--- Series Time (0008,0031). //--- We don't pull these out now, but can if we have to. if(fileReader.GetElementCS(0x0054,0x1102,tag,false) == EXIT_SUCCESS) { //---A string of characters with leading or trailing spaces (20H) being non-significant. list.decayCorrection = tag.c_str(); } else { list.decayCorrection = "MODULE_INIT_NO_VALUE"; } //--- //--- StudyDate if(fileReader.GetElementDA(0x0008,0x0021,tag,false) == EXIT_SUCCESS) { //--- YYYYMMDD yearstr.clear(); daystr.clear(); monthstr.clear(); len = tag.length(); if ( len >= 4 ) { yearstr = tag.substr(0, 4); } else { yearstr = "????"; } if ( len >= 6 ) { monthstr = tag.substr(4, 2); } else { monthstr = "??"; } if ( len >= 8 ) { daystr = tag.substr (6, 2); } else { daystr = "??"; } tag.clear(); tag = yearstr.c_str(); tag += "/"; tag += monthstr.c_str(); tag += "/"; tag += daystr.c_str(); list.studyDate = tag.c_str(); } else { list.studyDate = "MODULE_INIT_NO_VALUE"; } //--- //--- PatientName if(fileReader.GetElementPN(0x0010,0x0010,tag,false) == EXIT_SUCCESS) { list.patientName = tag.c_str(); } else { list.patientName = "MODULE_INIT_NO_VALUE"; } //--- //--- DecayFactor if(fileReader.GetElementDS(0x0054,0x1321,tag,false) == EXIT_SUCCESS) { //--- have to parse this out. what we have is //---A string of characters representing either a fixed point number or a //--- floating point number. A fixed point number shall contain only the //---characters 0-9 with an optional leading "+" or "-" and an optional "." //---to mark the decimal point. A floating point number shall be conveyed //---as defined in ANSI X3.9, with an "E" or "e" to indicate the start of the //---exponent. Decimal Strings may be padded with leading or trailing spaces. //---Embedded spaces are not allowed. or maybe atof does it already... list.decayFactor = tag.c_str() ; } else { list.decayFactor = "MODULE_INIT_NO_VALUE" ; } //--- //--- FrameReferenceTime if(fileReader.GetElementDS(0x0054,0x1300,tag,false) == EXIT_SUCCESS) { //--- The time that the pixel values in the image //--- occurred. Frame Reference Time is the //--- offset, in msec, from the Series reference //--- time. list.frameReferenceTime = tag.c_str(); } else { list.frameReferenceTime = "MODULE_INIT_NO_VALUE"; } //--- //--- SeriesTime if(fileReader.GetElementTM(0x0008,0x0031,tag,false) == EXIT_SUCCESS) { hourstr.clear(); minutestr.clear(); secondstr.clear(); len = tag.length(); if ( len >= 2 ) { hourstr = tag.substr(0, 2); } else { hourstr = "00"; } if ( len >= 4 ) { minutestr = tag.substr(2, 2); } else { minutestr = "00"; } if ( len >= 6 ) { secondstr = tag.substr(4); } else { secondstr = "00"; } tag.clear(); tag = hourstr.c_str(); tag += ":"; tag += minutestr.c_str(); tag += ":"; tag += secondstr.c_str(); list.seriesReferenceTime = tag.c_str(); } else { list.seriesReferenceTime = "MODULE_INIT_NO_VALUE"; } //--- //--- PatientWeight if(fileReader.GetElementDS(0x0010,0x1030,1,&list.patientWeight,false) == EXIT_SUCCESS) { //--- Expect same format as RadionuclideHalfLife list.weightUnits = "kg"; } else { list.patientWeight = 0.0; list.weightUnits = ""; } //--- //--- PatientSize if(fileReader.GetElementDS(0x0010,0x1020,1,&list.patientHeight,false) == EXIT_SUCCESS) { //--- Assumed to be in meters? list.heightUnits = "m"; } else { list.patientHeight = 0.0; list.heightUnits = "MODULE_INIT_NO_VALUE"; } //--- //--- PatientSex if(fileReader.GetElementCS(0x0010,0x0040,tag,false) == EXIT_SUCCESS) { list.patientSex = tag.c_str(); if(list.patientSex!="M" && list.patientSex!="F") { std::cout << "Warning: sex is not M or F but rather \"" << list.patientSex.c_str() << "\"" << std::endl; } } else { list.patientSex = "MODULE_INIT_NO_VALUE"; } //--- //--- CorrectedImage std::string correctedImage; if(fileReader.GetElementCS(0x0028,0x0051,correctedImage,false) == EXIT_SUCCESS) { list.correctedImage = correctedImage; } else { std::cout << "No corrected image detected." << std::endl; } /*//--- //--- CalibrationFactor if(fileReader.GetElementDS(0x7053,0x1009,1, &list.calibrationFactor,false) != EXIT_SUCCESS) { list.calibrationFactor = 0.0 ; }*/ } // check.... did we get all params we need for computation? if ( (parsingDICOM) && (list.injectedDose != 0.0) && (list.patientWeight != 0.0) && (list.seriesReferenceTime.compare("MODULE_INIT_NO_VALUE") != 0) && (list.injectionTime.compare("MODULE_INIT_NO_VALUE") != 0) && (list.radionuclideHalfLife.compare("MODULE_INIT_NO_VALUE") != 0) ) { std::cout << "Input parameters okay..." << std::endl; } else { std::cerr << "Missing some parameters..." << std::endl; return EXIT_FAILURE; } // convert from input units. if( list.radioactivityUnits.c_str() == NULL ) { std::cerr << "ComputeSUV: Got NULL radioactivity units. No computation done." << std::endl; return EXIT_FAILURE; } if( list.weightUnits.c_str() == NULL ) { std::cerr << "ComputeSUV: Got NULL weight units. No computation could be done." << std::endl; return EXIT_FAILURE; } list.SUVbwConversionFactor = 0.0; list.SUVlbmConversionFactor = 0.0; list.SUVbsaConversionFactor = 0.0; list.SUVibwConversionFactor = 0.0; if(list.correctedImage.compare("MODULE_INIT_NO_VALUE") != 0) { std::string correctedImage = list.correctedImage; if(correctedImage.find("ATTN")!=std::string::npos && (correctedImage.find("DECAY")!=std::string::npos || correctedImage.find("DECY")!=std::string::npos)) { std::cout << "ATTN/DECAY correction detected." << std::endl; if(list.decayCorrection=="START") { std::cout << "Decay correction START detected." << std::endl; std::string halfLife = list.radionuclideHalfLife; double weight = list.patientWeight; double height = list.patientHeight*100; //convert to centimeters double dose = list.injectedDose; std::cout << " INJECTED DOSE: " << list.injectedDose << std::endl; if( dose == 0.0 ) { std::cerr << "ComputeSUV: Got NULL dose!" << std::endl; return EXIT_FAILURE; } if( weight == 0.0 ) { std::cerr << "ComputeSUV: got zero weight!" << std::endl; return EXIT_FAILURE; } dose = ConvertRadioactivityUnits( dose, list.radioactivityUnits.c_str(), "kBq"); // kBq/mL double decayedDose = DecayCorrection(list, dose); weight = ConvertWeightUnits( weight, list.weightUnits.c_str(), "kg"); if( decayedDose == 0.0 ) { // oops, weight by dose is infinity. give error std::cerr << "ComputeSUV: Got 0.0 decayed dose!" << std::endl; return EXIT_FAILURE; } else { //All values okay; perform calculation list.SUVbwConversionFactor = weight / decayedDose; if(height != 0.0) { double leanBodyMass; // kg double bodySurfaceArea; // m^2 double idealBodyMass; // kg bodySurfaceArea = (pow(weight,0.425)*pow(height,0.725)*0.007184); list.SUVbsaConversionFactor = bodySurfaceArea / decayedDose; if(list.patientSex=="M") { //leanBodyMass = 1.10*weight - 120*(weight/height)*(weight/height); leanBodyMass = 1.10*weight - 128*(weight/height)*(weight/height); //TODO verify this formula list.SUVlbmConversionFactor = leanBodyMass / decayedDose; idealBodyMass = 48.0 + 1.06*(height - 152); if(idealBodyMass > weight){ idealBodyMass = weight; }; list.SUVibwConversionFactor = idealBodyMass / decayedDose; } if(list.patientSex=="F") { leanBodyMass = 1.07*weight - 148*(weight/height)*(weight/height); list.SUVlbmConversionFactor = leanBodyMass / decayedDose; idealBodyMass = 45.5 + 0.91*(height - 152); if(idealBodyMass > weight){ idealBodyMass = weight; }; list.SUVibwConversionFactor = idealBodyMass / decayedDose; } } else { std::cout << "Warning: No patient height detected. Cannot determine SUVbsa, SUVlbm, and SUVibw conversion factors." << std::endl; } } } else { std::cout << "Decay correction is not START." << std::endl; return EXIT_FAILURE; } } else { std::cout << "No attenuation/decay correction detected." << std::endl; return EXIT_FAILURE; } } else { std::cout << "No corrected image detected." << std::endl; return EXIT_FAILURE; } return EXIT_SUCCESS; } } // end of anonymous namespace void InsertCodeSequence(DcmItem* item, const DcmTag tag, const DSRCodedEntryValue entry, int itemNum=0){ DcmItem *codeSequenceItem; item->findOrCreateSequenceItem(tag, codeSequenceItem, itemNum); codeSequenceItem->putAndInsertString(DCM_CodeValue, entry.getCodeValue().c_str()); codeSequenceItem->putAndInsertString(DCM_CodeMeaning, entry.getCodeMeaning().c_str()); codeSequenceItem->putAndInsertString(DCM_CodingSchemeDesignator, entry.getCodingSchemeDesignator().c_str()); } bool ExportRWV(parameters & list, std::vector<DSRCodedEntryValue> measurementUnitsList, std::vector<std::string> measurementsList, std::string outputDir, std::string outputFileName = ""){ unsigned int numFiles = list.PETFilenames.size(); std::cout << numFiles << " files total" << std::endl; DcmFileFormat fileFormat; DcmDataset* petDataset = NULL; std::vector<OFString> instanceUIDs; for(unsigned int i=0;i<numFiles;i++){ if(fileFormat.loadFile(list.PETFilenames[i].c_str()).bad()){ continue; } petDataset = fileFormat.getAndRemoveDataset(); OFString modality, instanceUID, classUID; petDataset->findAndGetOFString(DCM_Modality, modality); if(std::string("PT") != modality.c_str()){ continue; } petDataset->findAndGetOFString(DCM_SOPInstanceUID, instanceUID); instanceUIDs.push_back(instanceUID); } DcmFileFormat rwvmFileFormat; DcmDataset* rwvDataset = rwvmFileFormat.getDataset(); dcmHelpersCommon::copyPatientModule(petDataset, rwvDataset); dcmHelpersCommon::copyClinicalTrialSubjectModule(petDataset, rwvDataset); dcmHelpersCommon::copyGeneralStudyModule(petDataset, rwvDataset); dcmHelpersCommon::copyPatientStudyModule(petDataset, rwvDataset); char uid[128]; // Series Module dcmGenerateUniqueIdentifier(uid); rwvDataset->putAndInsertString(DCM_Modality,"RWV"); rwvDataset->putAndInsertString(DCM_SeriesInstanceUID, uid); rwvDataset->putAndInsertString(DCM_SeriesNumber,list.seriesNumber.c_str()); rwvDataset->putAndInsertString(DCM_InstanceNumber,list.instanceNumber.c_str()); // SOP Common Module dcmGenerateUniqueIdentifier(uid); rwvDataset->putAndInsertString(DCM_SOPInstanceUID, uid); if(rwvDataset->putAndInsertString(DCM_SOPClassUID, UID_RealWorldValueMappingStorage).bad()){ std::cout << "Failed to set class Uid" << std::endl; } // Referenced Series Sequence DcmItem *referencedInstanceSeq; OFString petSeriesInstanceUID; petDataset->findAndGetOFString(DCM_SeriesInstanceUID, petSeriesInstanceUID); rwvDataset->findOrCreateSequenceItem(DCM_ReferencedSeriesSequence, referencedInstanceSeq); referencedInstanceSeq->putAndInsertString(DCM_SeriesInstanceUID, petSeriesInstanceUID.c_str()); for(unsigned int imageId=0;imageId<instanceUIDs.size();imageId++){ DcmItem* referencedSOPItem; referencedInstanceSeq->findOrCreateSequenceItem(DCM_ReferencedInstanceSequence, referencedSOPItem, imageId); referencedSOPItem->putAndInsertString(DCM_ReferencedSOPClassUID, UID_PositronEmissionTomographyImageStorage); referencedSOPItem->putAndInsertString(DCM_ReferencedSOPInstanceUID, instanceUIDs[imageId].c_str()); } // RWV Mapping Module OFString contentDate, contentTime; DcmDate::getCurrentDate(contentDate); DcmTime::getCurrentTime(contentTime); rwvDataset->putAndInsertString(DCM_ContentDate, contentDate.c_str()); rwvDataset->putAndInsertString(DCM_ContentTime, contentTime.c_str()); rwvDataset->putAndInsertString(DCM_SeriesDate, contentDate.c_str()); rwvDataset->putAndInsertString(DCM_SeriesTime, contentTime.c_str()); rwvDataset->putAndInsertString(DCM_SeriesDescription, list.seriesDescription.c_str()); for(unsigned int measurementId=0;measurementId<measurementUnitsList.size();measurementId++){ DcmItem *referencedImageRWVSeqItem, *rwvSeqItem;//, *rwvUnits; rwvDataset->findOrCreateSequenceItem(DCM_ReferencedImageRealWorldValueMappingSequence, referencedImageRWVSeqItem, measurementId); referencedImageRWVSeqItem->findOrCreateSequenceItem(DCM_RealWorldValueMappingSequence, rwvSeqItem); rwvSeqItem->putAndInsertString(DCM_LUTExplanation,measurementUnitsList[measurementId].getCodeMeaning().c_str()); rwvSeqItem->putAndInsertString(DCM_LUTLabel,measurementUnitsList[measurementId].getCodeValue().c_str()); rwvSeqItem->putAndInsertSint16(DCM_RealWorldValueFirstValueMapped,0); rwvSeqItem->putAndInsertUint16(DCM_RealWorldValueLastValueMapped,list.maxPixelValue); rwvSeqItem->putAndInsertString(DCM_RealWorldValueIntercept,"0"); rwvSeqItem->putAndInsertString(DCM_RealWorldValueSlope, measurementsList[measurementId].c_str()); DSRCodedEntryValue measurement = measurementUnitsList[measurementId]; InsertCodeSequence(rwvSeqItem, DCM_MeasurementUnitsCodeSequence, DSRCodedEntryValue(measurement.getCodeValue().c_str(), measurement.getCodingSchemeDesignator().c_str(), measurement.getCodeMeaning().c_str())); DcmItem *quantitySeqItem, *measurementMethodSeqItem; if(rwvSeqItem->findOrCreateSequenceItem(DcmTag(0x0040,0x9220, EVR_SQ),quantitySeqItem).bad()){ std::cerr << "Failed to add Quantity Definition sequence" << std::endl; } quantitySeqItem->putAndInsertString(DCM_ValueType,"CODE"); InsertCodeSequence(quantitySeqItem, DCM_ConceptNameCodeSequence, DSRCodedEntryValue("G-C1C6","SRT","Quantity")); InsertCodeSequence(quantitySeqItem, DCM_ConceptCodeSequence, DSRCodedEntryValue("126400","DCM","Standardized Uptake Value")); if(rwvSeqItem->findOrCreateSequenceItem(DcmTag(0x0040,0x9220, EVR_SQ),measurementMethodSeqItem, 1).bad()){ std::cerr << "Failed to add private sequence" << std::endl; } measurementMethodSeqItem->putAndInsertString(DCM_ValueType,"CODE"); if(measurement.getCodeValue() == "{SUVbw}g/ml"){ InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptNameCodeSequence, DSRCodedEntryValue("G-C036","SRT","Measurement Method")); InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptCodeSequence, DSRCodedEntryValue("126410","DCM","SUV body weight calculation method")); } else if(measurement.getCodeValue() == "{SUVlbm}g/ml"){ InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptNameCodeSequence, DSRCodedEntryValue("G-C036","SRT","Measurement Method")); InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptCodeSequence, DSRCodedEntryValue("126411","DCM","SUV lean body mass calculation method")); } else if(measurement.getCodeValue() == "{SUVbsa}cm2/ml"){ InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptNameCodeSequence, DSRCodedEntryValue("G-C036","SRT","Measurement Method")); InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptCodeSequence, DSRCodedEntryValue("126412","DCM","SUV body surface area calculation method")); } else if(measurement.getCodeValue() == "{SUVibw}g/ml"){ InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptNameCodeSequence, DSRCodedEntryValue("G-C036","SRT","Measurement Method")); InsertCodeSequence(measurementMethodSeqItem, DCM_ConceptCodeSequence, DSRCodedEntryValue("126413","DCM","SUV ideal body weight calculation method")); }; for(unsigned int imageId=0;imageId<instanceUIDs.size();imageId++){ DcmItem* referencedSOPItem; referencedImageRWVSeqItem->findOrCreateSequenceItem(DCM_ReferencedImageSequence, referencedSOPItem, imageId); referencedSOPItem->putAndInsertString(DCM_ReferencedSOPClassUID, UID_PositronEmissionTomographyImageStorage); referencedSOPItem->putAndInsertString(DCM_ReferencedSOPInstanceUID, instanceUIDs[imageId].c_str()); } } rwvDataset->putAndInsertString(DCM_ContentLabel, "RWV"); rwvDataset->putAndInsertString(DCM_InstanceNumber, "1"); rwvDataset->putAndInsertString(DCM_ContentDescription, "RWV"); rwvDataset->putAndInsertString(DCM_ContentCreatorName, "QIICR"); rwvDataset->putAndInsertString(DCM_Manufacturer, "https://github.com/QIICR/Slicer-SUVFactorCalculator"); rwvDataset->putAndInsertString(DCM_SoftwareVersions, SUVFactorCalculator_WC_REVISION); //CHECK_COND(rwvDataset->putAndInsertString(DCM_BodyPartExamined, "HEADNECK")); if (outputFileName.empty()) outputFileName = outputDir+"/"+uid+".dcm"; list.RWVMFile = outputFileName; std::cout << "saving RWVM to " << outputFileName << std::endl; OFCondition cond = rwvmFileFormat.saveFile(outputFileName.c_str(), EXS_LittleEndianExplicit); if(cond.bad()){ std::cout << "Failed to save the result!" << std::endl; std::cout << cond.text() << std::endl; } return true; } // ... // ............................................................................................... // ... int main( int argc, char * argv[] ) { PARSE_ARGS; parameters list; // ... // ... strings used for parsing out DICOM header info // ... std::string yearstr; std::string monthstr; std::string daystr; std::string hourstr; std::string minutestr; std::string secondstr; std::string tag; // convert dicom head to radiopharm data vars list.patientName = "MODULE_INIT_NO_VALUE"; list.studyDate = "MODULE_INIT_NO_VALUE"; list.radioactivityUnits = "MODULE_INIT_NO_VALUE"; list.volumeUnits = "MODULE_INIT_NO_VALUE"; list.injectedDose = 0.0; list.patientWeight = 0.0; list.patientHeight = 0.0; list.patientSex = "MODULE_INIT_NO_VALUE"; list.seriesReferenceTime = "MODULE_INIT_NO_VALUE"; list.injectionTime = "MODULE_INIT_NO_VALUE"; list.decayCorrection = "MODULE_INIT_NO_VALUE"; list.decayFactor = "MODULE_INIT_EMPTY_ID"; list.radionuclideHalfLife = "MODULE_INIT_NO_VALUE"; list.frameReferenceTime = "MODULE_INIT_NO_VALUE"; list.weightUnits = "kg"; list.correctedImage = "MODULE_INIT_NO_VALUE"; list.maxPixelValue = itk::NumericTraits< short >::min(); list.outputVolumeRequested = (SUVBWName!="" || SUVBSAName!="" || SUVLBMName!="" || SUVIBWName!=""); try { // pass the input parameters to the helper method list.PETDICOMPath = PETDICOMPath; list.PETSeriesInstanceUID = PETSeriesInstanceUID; list.seriesDescription = seriesDescription; list.seriesNumber = seriesNumber; list.instanceNumber = instanceNumber; // GenerateCLP makes a temporary file with the path saved to // returnParameterFile, write the output strings in there as key = value pairs list.returnParameterFile = returnParameterFile; if(LoadImagesAndComputeSUV( list ) != EXIT_FAILURE){ if (RWVDICOMPath!="" || RWVMFile!="") { // produce RWVM file std::vector<DSRCodedEntryValue> measurementsUnitsList; std::vector<std::string> measurementsList; std::stringstream SUVbwSStream, SUVlbmSStream, SUVbsaSStream, SUVibwSStream; if(list.SUVbwConversionFactor!=0.0) { SUVbwSStream << list.SUVbwConversionFactor; measurementsUnitsList.push_back(DSRCodedEntryValue("{SUVbw}g/ml","UCUM","Standardized Uptake Value body weight")); measurementsList.push_back(SUVbwSStream.str()); } if(list.SUVlbmConversionFactor!=0.0) { SUVlbmSStream << list.SUVlbmConversionFactor; measurementsUnitsList.push_back(DSRCodedEntryValue("{SUVlbm}g/ml","UCUM","Standardized Uptake Value lean body mass")); measurementsList.push_back(SUVlbmSStream.str()); } if(list.SUVbsaConversionFactor!=0.0) { SUVbsaSStream << list.SUVbsaConversionFactor; measurementsUnitsList.push_back(DSRCodedEntryValue("{SUVbsa}cm2/ml","UCUM","Standardized Uptake Value body surface area")); measurementsList.push_back(SUVbsaSStream.str()); } if(list.SUVibwConversionFactor!=0.0) { SUVibwSStream << list.SUVibwConversionFactor; measurementsUnitsList.push_back(DSRCodedEntryValue("{SUVibw}g/ml","UCUM","Standardized Uptake Value ideal body weight")); measurementsList.push_back(SUVibwSStream.str()); } ExportRWV(list, measurementsUnitsList, measurementsList, RWVDICOMPath.c_str(), RWVMFile); } if (list.outputVolumeRequested) { // write SUV normalized volume(s) if (SUVBWName!="") { if (list.SUVbwConversionFactor==0.0) std::cerr << "WARNING: Can't compute SUV body weight and produce normalized volume." << std::endl; else WriteNormalizedImage(list.unnormalizedVolume, SUVBWName, list.SUVbwConversionFactor); } if (SUVLBMName!="") { if (list.SUVlbmConversionFactor==0.0) std::cerr << "WARNING: Can't compute SUV lean body mass and produce normalized volume." << std::endl; else WriteNormalizedImage(list.unnormalizedVolume, SUVLBMName, list.SUVlbmConversionFactor); } if (SUVBSAName!="") { if (list.SUVbsaConversionFactor==0.0) std::cerr << "WARNING: Can't compute SUV body surface area and produce normalized volume." << std::endl; else WriteNormalizedImage(list.unnormalizedVolume, SUVBSAName, list.SUVbsaConversionFactor); } if (SUVIBWName!="") { if (list.SUVibwConversionFactor==0.0) std::cerr << "WARNING: Can't compute SUV ideal body weight and produce normalized volume." << std::endl; else WriteNormalizedImage(list.unnormalizedVolume, SUVIBWName, list.SUVibwConversionFactor); } } if (list.returnParameterFile!="") { std::cout << "saving numbers to " << returnParameterFile << std::endl; std::ofstream writeFile; writeFile.open( list.returnParameterFile.c_str() ); writeFile << "radioactivityUnits = " << list.radioactivityUnits.c_str() << std::endl; writeFile << "weightUnits = " << list.weightUnits.c_str() << std::endl; writeFile << "heightUnits = " << list.heightUnits.c_str() << std::endl; writeFile << "volumeUnits = " << list.volumeUnits.c_str() << std::endl; writeFile << "injectedDose = " << list.injectedDose << std::endl; //writeFile << "calibrationFactor = " << list.calibrationFactor << std::endl; writeFile << "patientWeight = " << list.patientWeight << std::endl; writeFile << "patientHeight = " << list.patientHeight << std::endl; writeFile << "patientSex = " << list.patientSex.c_str() << std::endl; writeFile << "seriesReferenceTime = " << list.seriesReferenceTime.c_str() << std::endl; writeFile << "injectionTime = " << list.injectionTime.c_str() << std::endl; writeFile << "decayCorrection = " << list.decayCorrection.c_str() << std::endl; writeFile << "decayFactor = " << list.decayFactor.c_str() << std::endl; writeFile << "radionuclideHalfLife = " << list.radionuclideHalfLife.c_str() << std::endl; writeFile << "frameReferenceTime = " << list.frameReferenceTime.c_str() << std::endl; writeFile << "SUVbwConversionFactor = " << list.SUVbwConversionFactor << std::endl; writeFile << "SUVlbmConversionFactor = " << list.SUVlbmConversionFactor << std::endl; writeFile << "SUVbsaConversionFactor = " << list.SUVbsaConversionFactor << std::endl; writeFile << "SUVibwConversionFactor = " << list.SUVibwConversionFactor << std::endl; writeFile << "RWVMFile = " << list.RWVMFile << std::endl; writeFile.close(); } std::cout << "SUVbwConversionFactor = " << list.SUVbwConversionFactor << std::endl; std::cout << "SUVlbmConversionFactor = " << list.SUVlbmConversionFactor << std::endl; std::cout << "SUVbsaConversionFactor = " << list.SUVbsaConversionFactor << std::endl; std::cout << "SUVibwConversionFactor = " << list.SUVibwConversionFactor << std::endl; } else { std::cerr << "ERROR: Failed to compute SUV" << std::endl; return EXIT_FAILURE; } } catch( itk::ExceptionObject & excep ) { std::cerr << argv[0] << ": exception caught !" << std::endl; std::cerr << excep << std::endl; return EXIT_FAILURE; } return EXIT_SUCCESS; }
Unknown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/dcmHelpersCommon.h
.h
2,845
65
#ifndef __dcmHelpersCommon_h #define __dcmHelpersCommon_h #include <vector> class DcmItem; class DcmTagKey; class DcmDataset; class DSRDocument; class DSRCodedEntryValue; class dcmHelpersCommon { protected: static const DcmTagKey patientModuleTags[]; static const DcmTagKey clinicalTrialSubjectModuleTags[]; static const DcmTagKey generalStudyModuleTags[]; static const DcmTagKey patientStudyModuleTags[]; static const DcmTagKey generalSeriesModuleTags[]; static const DcmTagKey generalEquipmentModuleTags[]; static const DcmTagKey frameOfReferenceModuleTags[]; static const DcmTagKey sopCommonModuleTags[]; static const DcmTagKey generalImageModuleTags[]; static const DcmTagKey srDocumentGeneralModuleTags[]; public: static void copyElement(const DcmTagKey, DcmDataset *src, DcmDataset *dest); static void copyPatientModule(DcmDataset *src, DcmDataset *dest); static void copyClinicalTrialSubjectModule(DcmDataset *src, DcmDataset *dest); static void copyGeneralStudyModule(DcmDataset *src, DcmDataset *dest); static void copyPatientStudyModule(DcmDataset *src, DcmDataset *dest); static void copyGeneralSeriesModule(DcmDataset *src, DcmDataset *dest); static void copyGeneralEquipmentModule(DcmDataset *src, DcmDataset *dest); static void copyFrameOfReferenceModule(DcmDataset *src, DcmDataset *dest); static void copySOPCommonModule(DcmDataset *src, DcmDataset *dest); static void copyGeneralImageModule(DcmDataset *src, DcmDataset *dest); static void copySRDocumentGeneralModule(DcmDataset *src, DcmDataset *dest); //static void copyItems(DcmDataset *src, DcmDataset *dest); // functions to initialize specific templates; return to the same level in the input // -- TID 4020 "CAD Image Library Entry Template" // this function adds an static void addImageLibraryEntry(DSRDocument*, DcmDataset*); // -- TID 1204 "Language of Content Item and Descendants" static void addLanguageOfContent(DSRDocument*); // -- TID 1001 "Observation context" static void addObservationContext(DSRDocument*); // -- TID 1002 "Observer context" static void addObserverContext(DSRDocument*,const char* deviceObserverUID = "", const char* deviceObserverName ="", const char* deviceObserverManufacturer = "", const char* deviceObserverModelName ="", const char* deviceObserverSerialNumber = ""); // -- TID 1005 "Procedure context" static void addProcedureContext(DSRDocument*); // -- TID 1006 "Subject context" static void addSubjectContext(DSRDocument*); //static void addCurrentRequestedProcedureEvidenceSequence(std::vector<DcmDataset*>& , // DcmDataset*); }; #endif
Unknown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/itkDCMTKFileReader.h
.h
12,563
372
/*========================================================================= * * Copyright Insight Software Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ #ifndef __itkDCMTKFileReader_h #define __itkDCMTKFileReader_h #include <stack> #include <vector> #include "itkByteSwapper.h" #include "itkIntTypes.h" #include "vnl/vnl_vector.h" #include "dcmtk/dcmdata/dcxfer.h" #include "dcmtk/dcmdata/dcvrds.h" #include "dcmtk/dcmdata/dcstack.h" #include "dcmtk/dcmdata/dcdatset.h" #include "itkMacro.h" #include "itkImageIOBase.h" class DcmSequenceOfItems; class DcmFileFormat; class DcmDictEntry; // Don't print error messages if you're not throwing // an exception // std::cerr body; #define DCMTKException(body) \ { \ if(throwException) \ { \ itkGenericExceptionMacro(body); \ } \ else \ { \ return EXIT_FAILURE; \ } \ } namespace itk { class DCMTKSequence { public: DCMTKSequence() : m_DcmSequenceOfItems(0) {} void SetDcmSequenceOfItems(DcmSequenceOfItems *seq); int card(); int GetSequence(unsigned long index, DCMTKSequence &target,bool throwException = true); int GetStack(unsigned short group, unsigned short element, DcmStack *resultStack, bool throwException = true); int GetElementCS(unsigned short group, unsigned short element, std::string &target, bool throwException = true); int GetElementFD(unsigned short group, unsigned short element, double * &target, bool throwException = true); int GetElementFD(unsigned short group, unsigned short element, double &target, bool throwException = true); int GetElementDS(unsigned short group, unsigned short element, std::string &target, bool throwException = true); int GetElementTM(unsigned short group, unsigned short element, std::string &target, bool throwException = true); /** Get an array of data values, as contained in a DICOM * DecimalString Item */ template <typename TType> int GetElementDS(unsigned short group, unsigned short element, unsigned short count, TType *target, bool throwException) { DcmStack resultStack; this->GetStack(group,element,&resultStack); DcmDecimalString *dsItem = dynamic_cast<DcmDecimalString *>(resultStack.top()); if(dsItem == 0) { DCMTKException(<< "Can't get DecimalString Element at tag " << std::hex << group << " " << element << std::dec); } OFVector<Float64> doubleVals; if(dsItem->getFloat64Vector(doubleVals) != EC_Normal) { DCMTKException(<< "Cant extract Array from DecimalString " << std::hex << group << " " << std::hex << element << std::dec); } if(doubleVals.size() != count) { DCMTKException(<< "DecimalString " << std::hex << group << " " << std::hex << element << " expected " << count << "items, but found " << doubleVals.size() << std::dec); } for(unsigned i = 0; i < count; i++) { target[i] = static_cast<TType>(doubleVals[i]); } return EXIT_SUCCESS; } int GetElementSQ(unsigned short group, unsigned short element, DCMTKSequence &target, bool throwException = true); private: DcmSequenceOfItems *m_DcmSequenceOfItems; }; class DCMTKFileReader { public: typedef DCMTKFileReader Self; DCMTKFileReader() : m_DFile(0), m_Dataset(0), m_Xfer(EXS_Unknown), m_FrameCount(0), m_FileNumber(-1L) { } ~DCMTKFileReader(); void SetFileName(const std::string &fileName); const std::string &GetFileName() const; void LoadFile(); int GetElementLO(unsigned short group, unsigned short element, std::string &target, bool throwException = true); int GetElementLO(unsigned short group, unsigned short element, std::vector<std::string> &target, bool throwException = true); /** Get an array of data values, as contained in a DICOM * DecimalString Item */ template <typename TType> int GetElementDS(unsigned short group, unsigned short element, unsigned short count, TType *target, bool throwException = true) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmDecimalString *dsItem = dynamic_cast<DcmDecimalString *>(el); if(dsItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } OFVector<Float64> doubleVals; if(dsItem->getFloat64Vector(doubleVals) != EC_Normal) { DCMTKException(<< "Cant extract Array from DecimalString " << std::hex << group << " " << std::hex << element << std::dec); } if(doubleVals.size() != count) { DCMTKException(<< "DecimalString " << std::hex << group << " " << std::hex << element << " expected " << count << "items, but found " << doubleVals.size() << std::dec); } for(unsigned i = 0; i < count; i++) { target[i] = static_cast<TType>(doubleVals[i]); } return EXIT_SUCCESS; } template <typename TType> int GetElementDSorOB(unsigned short group, unsigned short element, TType &target, bool throwException = true) { if(this->GetElementDS<TType>(group,element,1,&target,false) == EXIT_SUCCESS) { return EXIT_SUCCESS; } std::string val; if(this->GetElementOB(group,element,val) != EXIT_SUCCESS) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } const char *data = val.c_str(); const TType *fptr = reinterpret_cast<const TType *>(data); target = *fptr; switch(this->GetTransferSyntax()) { case EXS_LittleEndianImplicit: case EXS_LittleEndianExplicit: itk::ByteSwapper<TType>::SwapFromSystemToLittleEndian(&target); break; case EXS_BigEndianImplicit: case EXS_BigEndianExplicit: itk::ByteSwapper<TType>::SwapFromSystemToBigEndian(&target); break; default: break; } return EXIT_SUCCESS; } /** Get a DecimalString Item as a single string */ int GetElementDS(unsigned short group, unsigned short element, std::string &target, bool throwException = true); int GetElementFD(unsigned short group, unsigned short element, double &target, bool throwException = true); int GetElementFD(unsigned short group, unsigned short element, double * &target, bool throwException = true); int GetElementFL(unsigned short group, unsigned short element, float &target, bool throwException = true); int GetElementFLorOB(unsigned short group, unsigned short element, float &target, bool throwException = true); int GetElementUS(unsigned short group, unsigned short element, unsigned short &target, bool throwException = true); int GetElementUS(unsigned short group, unsigned short element, unsigned short *&target, bool throwException = true); /** Get a DecimalString Item as a single string */ int GetElementCS(unsigned short group, unsigned short element, std::string &target, bool throwException = true); /** Get a PersonName Item as a single string */ int GetElementPN(unsigned short group, unsigned short element, std::string &target, bool throwException = true); /** get an IS (Integer String Item */ int GetElementIS(unsigned short group, unsigned short element, ::itk::int32_t &target, bool throwException = true); int GetElementISorOB(unsigned short group, unsigned short element, ::itk::int32_t &target, bool throwException = true); /** get an OB OtherByte Item */ int GetElementOB(unsigned short group, unsigned short element, std::string &target, bool throwException = true); int GetElementSQ(unsigned short group, unsigned short entry, DCMTKSequence &sequence, bool throwException = true); int GetElementUI(unsigned short group, unsigned short entry, std::string &target, bool throwException = true); int GetElementDA(unsigned short group, unsigned short element, std::string &target, bool throwException = true); int GetElementTM(unsigned short group, unsigned short element, std::string &target, bool throwException = true); int GetDirCosines(vnl_vector<double> &dir1, vnl_vector<double> &dir2, vnl_vector<double> &dir3); int GetFrameCount() const; int GetSlopeIntercept(double &slope, double &intercept); int GetDimensions(unsigned short &rows, unsigned short &columns); ImageIOBase::IOComponentType GetImageDataType(); ImageIOBase::IOPixelType GetImagePixelType(); int GetSpacing(double *spacing); int GetOrigin(double *origin); E_TransferSyntax GetTransferSyntax() const; long GetFileNumber() const; static void AddDictEntry(DcmDictEntry *entry); private: static unsigned ascii2hex(char c); static std::string ConvertFromOB(OFString &toConvert); std::string m_FileName; DcmFileFormat* m_DFile; DcmDataset * m_Dataset; E_TransferSyntax m_Xfer; Sint32 m_FrameCount; long m_FileNumber; }; extern bool CompareDCMTKFileReaders(DCMTKFileReader *a, DCMTKFileReader *b); } #endif // __itkDCMTKFileReader_h
Unknown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/itkDCMTKFileReader.cxx
.cxx
34,168
1,206
/*========================================================================= * * Copyright Insight Software Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ #include "itkDCMTKFileReader.h" #undef HAVE_SSTREAM // 'twould be nice if people coded without using // incredibly generic macro names #include "dcmtk/config/osconfig.h" // make sure OS specific configuration is included first #define INCLUDE_CSTDIO #define INCLUDE_CSTRING #include <dcmtk/dcmdata/dcuid.h> #include "dcmtk/dcmdata/dcdict.h" // For DcmDataDictionary #include "dcmtk/dcmdata/dcsequen.h" /* for DcmSequenceOfItems */ #include "dcmtk/dcmdata/dcvrcs.h" /* for DcmCodeString */ #include "dcmtk/dcmdata/dcvrfd.h" /* for DcmFloatingPointDouble */ #include "dcmtk/dcmdata/dcvrfl.h" /* for DcmFloatingPointDouble */ #include "dcmtk/dcmdata/dcvrus.h" /* for DcmUnsignedShort */ #include "dcmtk/dcmdata/dcvris.h" /* for DcmIntegerString */ #include "dcmtk/dcmdata/dcvrobow.h" /* for DcmOtherByteOtherWord */ #include "dcmtk/dcmdata/dcvrui.h" /* for DcmUniqueIdentifier */ #include "dcmtk/dcmdata/dcfilefo.h" /* for DcmFileFormat */ #include "dcmtk/dcmdata/dcdeftag.h" /* for DCM_NumberOfFrames */ #include "dcmtk/dcmdata/dcvrlo.h" /* for DcmLongString */ #include "dcmtk/dcmdata/dcvrtm.h" /* for DCMTime */ #include "dcmtk/dcmdata/dcvrda.h" /* for DcmDate */ #include "dcmtk/dcmdata/dcvrpn.h" /* for DcmPersonName */ // #include "diregist.h" /* include to support color images */ #include "vnl/vnl_cross.h" namespace itk { void DCMTKSequence ::SetDcmSequenceOfItems(DcmSequenceOfItems *seq) { this->m_DcmSequenceOfItems = seq; } int DCMTKSequence ::card() { return this->m_DcmSequenceOfItems->card(); } int DCMTKSequence ::GetSequence(unsigned long index, DCMTKSequence &target, bool throwException) { DcmItem *item = this->m_DcmSequenceOfItems->getItem(index); DcmSequenceOfItems *sequence = dynamic_cast<DcmSequenceOfItems *>(item); if(sequence == 0) { DCMTKException(<< "Can't find DCMTKSequence at index " << index); } target.SetDcmSequenceOfItems(sequence); return EXIT_SUCCESS; } int DCMTKSequence ::GetStack(unsigned short group, unsigned short element, DcmStack *resultStack, bool throwException) { DcmTagKey tagkey(group,element); if(this->m_DcmSequenceOfItems->search(tagkey,*resultStack) != EC_Normal) { DCMTKException(<< "Can't find tag " << std::hex << group << " " << element << std::dec); } return EXIT_SUCCESS; } int DCMTKSequence ::GetElementCS(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmStack resultStack; this->GetStack(group,element,&resultStack); DcmCodeString *codeStringElement = dynamic_cast<DcmCodeString *>(resultStack.top()); if(codeStringElement == 0) { DCMTKException(<< "Can't get CodeString Element at tag " << std::hex << group << " " << element << std::dec); } OFString ofString; if(codeStringElement->getOFStringArray(ofString) != EC_Normal) { DCMTKException(<< "Can't get OFString Value at tag " << std::hex << group << " " << element << std::dec); } target = ""; for(unsigned j = 0; j < ofString.length(); ++j) { target += ofString[j]; } return EXIT_SUCCESS; } int DCMTKSequence:: GetElementFD(unsigned short group, unsigned short element, double * &target, bool throwException) { DcmStack resultStack; this->GetStack(group,element,&resultStack); DcmFloatingPointDouble *fdItem = dynamic_cast<DcmFloatingPointDouble *>(resultStack.top()); if(fdItem == 0) { DCMTKException(<< "Can't get CodeString Element at tag " << std::hex << group << " " << element << std::dec); } if(fdItem->getFloat64Array(target) != EC_Normal) { DCMTKException(<< "Can't get floatarray Value at tag " << std::hex << group << " " << element << std::dec); } return EXIT_SUCCESS; } int DCMTKSequence ::GetElementFD(unsigned short group, unsigned short element, double &target, bool throwException) { double *array; this->GetElementFD(group,element,array,throwException); target = array[0]; return EXIT_SUCCESS; } int DCMTKSequence ::GetElementDS(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmStack resultStack; this->GetStack(group,element,&resultStack); DcmDecimalString *decimalStringElement = dynamic_cast<DcmDecimalString *>(resultStack.top()); if(decimalStringElement == 0) { DCMTKException(<< "Can't get DecimalString Element at tag " << std::hex << group << " " << element << std::dec); } OFString ofString; if(decimalStringElement->getOFStringArray(ofString) != EC_Normal) { DCMTKException(<< "Can't get DecimalString Value at tag " << std::hex << group << " " << element << std::dec); } target = ""; for(unsigned j = 0; j < ofString.length(); ++j) { target += ofString[j]; } return EXIT_SUCCESS; } int DCMTKSequence ::GetElementSQ(unsigned short group, unsigned short element, DCMTKSequence &target, bool throwException) { DcmTagKey tagkey(group,element); DcmStack resultStack; this->GetStack(group,element,&resultStack); DcmSequenceOfItems *seqElement = dynamic_cast<DcmSequenceOfItems *>(resultStack.top()); if(seqElement == 0) { DCMTKException(<< "Can't get at tag " << std::hex << group << " " << element << std::dec); } target.SetDcmSequenceOfItems(seqElement); return EXIT_SUCCESS; } int DCMTKSequence ::GetElementTM(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmStack resultStack; this->GetStack(group,element,&resultStack); DcmTime *dcmTime = dynamic_cast<DcmTime *>(resultStack.top()); if(dcmTime == 0) { DCMTKException(<< "Can't get at tag " << std::hex << group << " " << element << std::dec); } char *cs; dcmTime->getString(cs); target = cs; return EXIT_SUCCESS; } DCMTKFileReader ::~DCMTKFileReader() { delete m_DFile; } void DCMTKFileReader ::SetFileName(const std::string &fileName) { this->m_FileName = fileName; } const std::string & DCMTKFileReader ::GetFileName() const { return this->m_FileName; } void DCMTKFileReader ::LoadFile() { if(this->m_FileName == "") { itkGenericExceptionMacro(<< "No filename given" ); } if(this->m_DFile != 0) { delete this->m_DFile; } this->m_DFile = new DcmFileFormat(); OFCondition cond = this->m_DFile->loadFile(this->m_FileName.c_str()); // /* transfer syntax, autodetect */ // EXS_Unknown, // /* group length */ // EGL_noChange, // /* Max read length */ // 1024, // should be big // // enough for // // header stuff but // // prevent reading // // image data. // /* file read mode */ // ERM_fileOnly); if(cond != EC_Normal) { itkGenericExceptionMacro(<< cond.text() << ": reading file " << this->m_FileName); } this->m_Dataset = this->m_DFile->getDataset(); this->m_Xfer = this->m_Dataset->getOriginalXfer(); if(this->m_Dataset->findAndGetSint32(DCM_NumberOfFrames,this->m_FrameCount).bad()) { this->m_FrameCount = 1; } int fnum; this->GetElementIS(0x0020,0x0013,fnum); this->m_FileNumber = fnum; } int DCMTKFileReader ::GetElementLO(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmLongString *loItem = dynamic_cast<DcmLongString *>(el); if(loItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } OFString ofString; if(loItem->getOFStringArray(ofString) != EC_Normal) { DCMTKException(<< "Cant get string from element " << std::hex << group << " " << std::hex << element << std::dec); } target = ""; for(unsigned i = 0; i < ofString.size(); i++) { target += ofString[i]; } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementLO(unsigned short group, unsigned short element, std::vector<std::string> &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmLongString *loItem = dynamic_cast<DcmLongString *>(el); if(loItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } target.clear(); OFString ofString; for(unsigned long i = 0; loItem->getOFString(ofString,i) == EC_Normal; ++i) { std::string targetStr = ""; for(unsigned j = 0; j < ofString.size(); j++) { targetStr += ofString[j]; } target.push_back(targetStr); } return EXIT_SUCCESS; } /** Get a DecimalString Item as a single string */ int DCMTKFileReader ::GetElementDS(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmDecimalString *dsItem = dynamic_cast<DcmDecimalString *>(el); if(dsItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } OFString ofString; if(dsItem->getOFStringArray(ofString) != EC_Normal) { DCMTKException(<< "Can't get DecimalString Value at tag " << std::hex << group << " " << element << std::dec); } target = ""; for(unsigned j = 0; j < ofString.length(); ++j) { target += ofString[j]; } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementFD(unsigned short group, unsigned short element, double &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmFloatingPointDouble *fdItem = dynamic_cast<DcmFloatingPointDouble *>(el); if(fdItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } if(fdItem->getFloat64(target) != EC_Normal) { DCMTKException(<< "Cant extract Array from DecimalString " << std::hex << group << " " << std::hex << element << std::dec); } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementFD(unsigned short group, unsigned short element, double * &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmFloatingPointDouble *fdItem = dynamic_cast<DcmFloatingPointDouble *>(el); if(fdItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } if(fdItem->getFloat64Array(target) != EC_Normal) { DCMTKException(<< "Cant extract Array from DecimalString " << std::hex << group << " " << std::hex << element << std::dec); } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementFL(unsigned short group, unsigned short element, float &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmFloatingPointSingle *flItem = dynamic_cast<DcmFloatingPointSingle *>(el); if(flItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } if(flItem->getFloat32(target) != EC_Normal) { DCMTKException(<< "Cant extract Array from DecimalString " << std::hex << group << " " << std::hex << element << std::dec); } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementFLorOB(unsigned short group, unsigned short element, float &target, bool throwException) { if(this->GetElementFL(group,element,target,false) == EXIT_SUCCESS) { return EXIT_SUCCESS; } std::string val; if(this->GetElementOB(group,element,val) != EXIT_SUCCESS) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } const char *data = val.c_str(); const float *fptr = reinterpret_cast<const float *>(data); target = *fptr; switch(this->GetTransferSyntax()) { case EXS_LittleEndianImplicit: case EXS_LittleEndianExplicit: itk::ByteSwapper<float>::SwapFromSystemToLittleEndian(&target); break; case EXS_BigEndianImplicit: case EXS_BigEndianExplicit: itk::ByteSwapper<float>::SwapFromSystemToBigEndian(&target); break; default: break; } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementUS(unsigned short group, unsigned short element, unsigned short &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmUnsignedShort *usItem = dynamic_cast<DcmUnsignedShort *>(el); if(usItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } if(usItem->getUint16(target) != EC_Normal) { DCMTKException(<< "Cant extract Array from DecimalString " << std::hex << group << " " << std::hex << element << std::dec); } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementUS(unsigned short group, unsigned short element, unsigned short *&target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmUnsignedShort *usItem = dynamic_cast<DcmUnsignedShort *>(el); if(usItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } if(usItem->getUint16Array(target) != EC_Normal) { DCMTKException(<< "Cant extract Array from DecimalString " << std::hex << group << " " << std::hex << element << std::dec); } return EXIT_SUCCESS; } /** Get a DecimalString Item as a single string */ int DCMTKFileReader ::GetElementCS(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmCodeString *csItem = dynamic_cast<DcmCodeString *>(el); if(csItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } OFString ofString; if(csItem->getOFStringArray(ofString) != EC_Normal) { DCMTKException(<< "Can't get DecimalString Value at tag " << std::hex << group << " " << element << std::dec); } target = ""; for(unsigned j = 0; j < ofString.length(); ++j) { target += ofString[j]; } return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementPN(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmPersonName *pnItem = dynamic_cast<DcmPersonName *>(el); if(pnItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } OFString ofString; if(pnItem->getOFStringArray(ofString) != EC_Normal) { DCMTKException(<< "Can't get DecimalString Value at tag " << std::hex << group << " " << element << std::dec); } target = ""; for(unsigned j = 0; j < ofString.length(); ++j) { target += ofString[j]; } return EXIT_SUCCESS; } /** get an IS (Integer String Item */ int DCMTKFileReader ::GetElementIS(unsigned short group, unsigned short element, ::itk::int32_t &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmIntegerString *isItem = dynamic_cast<DcmIntegerString *>(el); if(isItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } Sint32 _target; // MSVC seems to have type conversion problems with // using int32_t as a an argument to getSint32 if(isItem->getSint32(_target) != EC_Normal) { DCMTKException(<< "Can't get DecimalString Value at tag " << std::hex << group << " " << element << std::dec); } target = static_cast< ::itk::int32_t>(_target); return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementISorOB(unsigned short group, unsigned short element, ::itk::int32_t &target, bool throwException) { if(this->GetElementIS(group,element,target,false) == EXIT_SUCCESS) { return EXIT_SUCCESS; } std::string val; if(this->GetElementOB(group,element,val,throwException) != EXIT_SUCCESS) { return EXIT_FAILURE; } const char *data = val.c_str(); const int *iptr = reinterpret_cast<const int *>(data); target = *iptr; switch(this->GetTransferSyntax()) { case EXS_LittleEndianImplicit: case EXS_LittleEndianExplicit: itk::ByteSwapper<int>::SwapFromSystemToLittleEndian(&target); break; case EXS_BigEndianImplicit: case EXS_BigEndianExplicit: itk::ByteSwapper<int>::SwapFromSystemToBigEndian(&target); break; default: // no idea what to do break; } return EXIT_SUCCESS; } /** get an OB OtherByte Item */ int DCMTKFileReader ::GetElementOB(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmOtherByteOtherWord *obItem = dynamic_cast<DcmOtherByteOtherWord *>(el); if(obItem == 0) { DCMTKException(<< "Cant find DecimalString element " << std::hex << group << " " << std::hex << element << std::dec); } OFString ofString; if(obItem->getOFStringArray(ofString) != EC_Normal) { DCMTKException(<< "Can't get OFString Value at tag " << std::hex << group << " " << element << std::dec); } target = Self::ConvertFromOB(ofString); return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementSQ(unsigned short group, unsigned short entry, DCMTKSequence &sequence, bool throwException) { DcmSequenceOfItems *seq; DcmTagKey tagKey(group,entry); if(this->m_Dataset->findAndGetSequence(tagKey,seq) != EC_Normal) { DCMTKException(<< "Can't find sequence " << std::hex << group << " " << std::hex << entry) } sequence.SetDcmSequenceOfItems(seq); return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementUI(unsigned short group, unsigned short entry, std::string &target, bool throwException) { DcmTagKey tagKey(group,entry); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagKey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << entry << std::dec); } DcmUniqueIdentifier *uiItem = dynamic_cast<DcmUniqueIdentifier *>(el); if(uiItem == 0) { DCMTKException(<< "Can't convert data item " << group << "," << entry); } OFString ofString; if(uiItem->getOFStringArray(ofString,0) != EC_Normal) { DCMTKException(<< "Can't get UID Value at tag " << std::hex << group << " " << std::hex << entry << std::dec); } target = ""; for(unsigned int j = 0; j < ofString.length(); ++j) { target.push_back(ofString[j]); } return EXIT_SUCCESS; } int DCMTKFileReader:: GetElementDA(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmDate *dcmDate = dynamic_cast<DcmDate *>(el); if(dcmDate == 0) { DCMTKException(<< "Can't get at tag " << std::hex << group << " " << element << std::dec); } char *cs; dcmDate->getString(cs); target = cs; return EXIT_SUCCESS; } int DCMTKFileReader ::GetElementTM(unsigned short group, unsigned short element, std::string &target, bool throwException) { DcmTagKey tagkey(group,element); DcmElement *el; if(this->m_Dataset->findAndGetElement(tagkey,el) != EC_Normal) { DCMTKException(<< "Cant find tag " << std::hex << group << " " << std::hex << element << std::dec); } DcmTime *dcmTime = dynamic_cast<DcmTime *>(el); if(dcmTime == 0) { DCMTKException(<< "Can't get at tag " << std::hex << group << " " << element << std::dec); } char *cs; dcmTime->getString(cs); target = cs; return EXIT_SUCCESS; } int DCMTKFileReader ::GetDirCosines(vnl_vector<double> &dir1, vnl_vector<double> &dir2, vnl_vector<double> &dir3) { double dircos[6]; DCMTKSequence planeSeq; if(this->GetElementDS(0x0020,0x0037,6,dircos,false) != EXIT_SUCCESS) { if(this->GetElementSQ(0x0020,0x9116,planeSeq,false) == EXIT_SUCCESS) { if(planeSeq.GetElementDS(0x0020,0x0037,6,dircos,false) != EXIT_SUCCESS) { return EXIT_FAILURE; } } } dir1[0] = dircos[0]; dir1[1] = dircos[1]; dir1[2] = dircos[2]; dir2[0] = dircos[3]; dir2[1] = dircos[4]; dir2[2] = dircos[5]; dir3 = vnl_cross_3d(dir1,dir2); return EXIT_SUCCESS; } int DCMTKFileReader ::GetSlopeIntercept(double &slope, double &intercept) { if(this->GetElementDS<double>(0x0028,1053,1,&slope,false) != EXIT_SUCCESS) { slope = 1.0; } if(this->GetElementDS<double>(0x0028,1052,1,&intercept,false) != EXIT_SUCCESS) { intercept = 0.0; } return EXIT_SUCCESS; } ImageIOBase::IOPixelType DCMTKFileReader ::GetImagePixelType() { unsigned short SamplesPerPixel; if(this->GetElementUS(0x0028,0x0100,SamplesPerPixel,false) != EXIT_SUCCESS) { return ImageIOBase::UNKNOWNPIXELTYPE; } ImageIOBase::IOPixelType pixelType; switch(SamplesPerPixel) { case 8: case 16: pixelType = ImageIOBase::SCALAR; break; case 24: pixelType = ImageIOBase::RGB; break; default: pixelType = ImageIOBase::VECTOR; } return pixelType; } ImageIOBase::IOComponentType DCMTKFileReader ::GetImageDataType() { unsigned short IsSigned; unsigned short BitsAllocated; unsigned short BitsStored; unsigned short HighBit; ImageIOBase::IOComponentType type = ImageIOBase::UNKNOWNCOMPONENTTYPE; if(this->GetElementUS(0x0028,0x0100,BitsAllocated,false) != EXIT_SUCCESS || this->GetElementUS(0x0028,0x0101,BitsStored,false) != EXIT_SUCCESS || this->GetElementUS(0x0028,0x0102,HighBit,false) != EXIT_SUCCESS || this->GetElementUS(0x0028,0x0103,IsSigned,false) != EXIT_SUCCESS) { return type; } double slope, intercept; this->GetSlopeIntercept(slope,intercept); switch( BitsAllocated ) { case 1: case 8: case 24: // RGB? if(IsSigned) { type = ImageIOBase::CHAR; } else { type = ImageIOBase::UCHAR; } break; case 12: case 16: if(IsSigned) { type = ImageIOBase::SHORT; } else { type = ImageIOBase::USHORT; } break; case 32: case 64: // Don't know what this actually means if(IsSigned) { type = ImageIOBase::LONG; } else { type = ImageIOBase::ULONG; } break; case 0: default: break; //assert(0); } return type; } int DCMTKFileReader ::GetDimensions(unsigned short &rows, unsigned short &columns) { if(this->GetElementUS(0x0028,0x0010,rows,false) != EXIT_SUCCESS || this->GetElementUS(0x0028,0x0011,columns,false) != EXIT_SUCCESS) { return EXIT_FAILURE; } return EXIT_SUCCESS; } int DCMTKFileReader ::GetSpacing(double *spacing) { double _spacing[3]; // // There are several tags that can have spacing, and we're going // from most to least desirable, starting with PixelSpacing, which // is guaranteed to be in patient space. // Imager Pixel spacing is inter-pixel spacing at the sensor front plane // Pixel spacing if((this->GetElementDS<double>(0x0028,0x0030,2,_spacing,false) != EXIT_SUCCESS && // imager pixel spacing this->GetElementDS<double>(0x0018, 0x1164, 2, &_spacing[0],false) != EXIT_SUCCESS && // Nominal Scanned PixelSpacing this->GetElementDS<double>(0x0018, 0x2010, 2, &_spacing[0],false) != EXIT_SUCCESS) || // slice thickness this->GetElementDS<double>(0x0018,0x0050,1,&_spacing[2],false) != EXIT_SUCCESS) { // that method failed, go looking for the spacing sequence DCMTKSequence spacingSequence; DCMTKSequence subSequence; DCMTKSequence subsubSequence; // first, shared function groups sequence, then // per-frame groups sequence if((this->GetElementSQ(0x5200,0x9229,spacingSequence,false) == EXIT_SUCCESS || this->GetElementSQ(0X5200,0X9230,spacingSequence,false) == EXIT_SUCCESS) && spacingSequence.GetSequence(0,subSequence,false) == EXIT_SUCCESS && subSequence.GetElementSQ(0x0028,0x9110,subsubSequence,false) == EXIT_SUCCESS) { if(subsubSequence.GetElementDS<double>(0x0028,0x0030,2,_spacing,false) != EXIT_SUCCESS) { // Pixel Spacing _spacing[0] = _spacing[1] = 1.0; } if(subsubSequence.GetElementDS<double>(0x0018,0x0050,1,&_spacing[2],false) != EXIT_SUCCESS) { // Slice Thickness _spacing[2] = 1.0; } } else { // punt if no info found. _spacing[0] = _spacing[1] = _spacing[2] = 1.0; } } // // spacing is row spacing\column spacing // but a slice is width-first, i.e. columns increase fastest. // spacing[0] = _spacing[1]; spacing[1] = _spacing[0]; spacing[2] = _spacing[2]; return EXIT_SUCCESS; } int DCMTKFileReader ::GetOrigin(double *origin) { DCMTKSequence originSequence; DCMTKSequence subSequence; DCMTKSequence subsubSequence; if((this->GetElementSQ(0x5200,0x9229,originSequence,false) == EXIT_SUCCESS || this->GetElementSQ(0X5200,0X9239,originSequence,false) == EXIT_SUCCESS) && originSequence.GetSequence(0,subSequence,false) == EXIT_SUCCESS && subSequence.GetElementSQ(0x0028,0x9113,subsubSequence,false) == EXIT_SUCCESS) { subsubSequence.GetElementDS<double>(0x0020,0x0032,3,origin,true); return EXIT_SUCCESS; } this->GetElementDS<double>(0x0020,0x0032,3,origin,true); return EXIT_SUCCESS; } int DCMTKFileReader ::GetFrameCount() const { return this->m_FrameCount; } E_TransferSyntax DCMTKFileReader ::GetTransferSyntax() const { return m_Xfer; } long DCMTKFileReader ::GetFileNumber() const { return m_FileNumber; } void DCMTKFileReader ::AddDictEntry(DcmDictEntry *entry) { DcmDataDictionary &dict = dcmDataDict.wrlock(); dict.addEntry(entry); #if OFFIS_DCMTK_VERSION_NUMBER < 364 dcmDataDict.unlock(); #else dcmDataDict.rdunlock(); #endif } unsigned DCMTKFileReader ::ascii2hex(char c) { switch(c) { case '0': return 0; case '1': return 1; case '2': return 2; case '3': return 3; case '4': return 4; case '5': return 5; case '6': return 6; case '7': return 7; case '8': return 8; case '9': return 9; case 'a': case 'A': return 10; case 'b': case 'B': return 11; case 'c': case 'C': return 12; case 'd': case 'D': return 13; case 'e': case 'E': return 14; case 'f': case 'F': return 15; } return 255; // should never happen } std::string DCMTKFileReader ::ConvertFromOB(OFString &toConvert) { // string format is nn\nn\nn... std::string rval; for(size_t pos = 0; pos < toConvert.size(); pos += 3) { unsigned char convert[2]; convert[0] = Self::ascii2hex(toConvert[pos]); convert[1] = Self::ascii2hex(toConvert[pos+1]); unsigned char conv = convert[0] << 4; conv += convert[1]; rval.push_back(static_cast<unsigned char>(conv)); } return rval; } bool CompareDCMTKFileReaders(DCMTKFileReader *a, DCMTKFileReader *b) { return a->GetFileNumber() < b->GetFileNumber(); } }
Unknown
3D
QIICR/Slicer-PETDICOMExtension
SUVFactorCalculatorCLI/Testing/Cxx/SUVFactorCalculatorTest.cxx
.cxx
431
26
#if defined(_MSC_VER) #pragma warning ( disable : 4786 ) #endif #ifdef __BORLANDC__ #define ITK_LEAN_AND_MEAN #endif #include "itkTestMain.h" // STD includes #include <iostream> #ifdef WIN32 # define MODULE_IMPORT __declspec(dllimport) #else # define MODULE_IMPORT #endif extern "C" MODULE_IMPORT int ModuleEntryPoint(int, char* []); void RegisterTests() { StringToTestFunctionMap["ModuleEntryPoint"] = ModuleEntryPoint; }
Unknown
3D
QIICR/Slicer-PETDICOMExtension
Testing/PETDicomExtensionSelfTest.py
.py
12,910
287
import os import unittest import vtk, qt, ctk, slicer, logging from DICOMLib import DICOMUtils from slicer.ScriptedLoadableModule import * if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): import pydicom else: import dicom # # PETDicomExtensionSelfTest # class PETDicomExtensionSelfTest(ScriptedLoadableModule): def __init__(self, parent): ScriptedLoadableModule.__init__(self, parent) self.parent.title = "PETDicomExtensionSelfTest" self.parent.categories = ["Testing.TestCases"] self.parent.dependencies = [] self.parent.contributors = ["Christian Bauer (University of Iowa)"] self.parent.helpText = """This is a self test for PET DICOM plugins.""" parent.acknowledgementText = """This work was partially funded by NIH grants U01-CA140206 and U24-CA180918.""" # Add this test to the SelfTest module's list for discovery when the module # is created. Since this module may be discovered before SelfTests itself, # create the list if it doesn't already exist. try: slicer.selfTests except AttributeError: slicer.selfTests = {} slicer.selfTests['PETDicomExtensionSelfTest'] = self.runTest def runTest(self): tester = PETDicomExtensionSelfTestTest() tester.runTest() # # PETDicomExtensionSelfTestWidget # class PETDicomExtensionSelfTestWidget(ScriptedLoadableModuleWidget): def setup(self): ScriptedLoadableModuleWidget.setup(self) # # PETDicomExtensionSelfTestLogic # class PETDicomExtensionSelfTestLogic(ScriptedLoadableModuleLogic): """This class should implement all the actual computation done by your module. The interface should be such that other python code can import this class and make use of the functionality without requiring an instance of the Widget """ def __init__(self): pass class PETDicomExtensionSelfTestTest(ScriptedLoadableModuleTest): """ This is the test case for your scripted module. """ # ------------------------------------------------------------------------------ def setUp(self): """ Do whatever is needed to reset the state - typically a scene clear will be enough. """ self.UID = '1.3.6.1.4.1.14519.5.2.1.2744.7002.886851941687931416391879144903' self.PatientName = 'QIN-HEADNECK-01-0139' self.tempDicomDatabase = os.path.join(slicer.app.temporaryPath,'PETTest') slicer.mrmlScene.Clear(0) self.originalDicomDatabase = DICOMUtils.openTemporaryDatabase(self.tempDicomDatabase) # ------------------------------------------------------------------------------ def doCleanups(self): """ cleanup temporary data in case an exception occurs """ self.tearDown() # ------------------------------------------------------------------------------ def tearDown(self): """ Close temporary DICOM database and remove temporary data """ import shutil if self.originalDicomDatabase: DICOMUtils.closeTemporaryDatabase(self.originalDicomDatabase, True) shutil.rmtree(self.tempDicomDatabase) # closeTemporaryDatabase cleanup doesn't work. We need to do it manually self.originalDicomDatabase = None def runTest(self): """Run as few or as many tests as needed here. """ self.setUp() self.test_SUVFactorCalculatorCLI() self.test_PETDicomExtensionSelfTest_Main() self.tearDown() def test_SUVFactorCalculatorCLI(self): """ test PET SUV Factor Calculator CLI """ self.delayDisplay('Checking for SUV Factor Calculator CLI') self.assertTrue(hasattr(slicer.modules,"suvfactorcalculator")) self.delayDisplay('Adding PET DICOM dataset (including download if necessary)') self._downloadTestData() fileList = [os.path.join(self.tempDicomDatabase,f) for f in os.listdir(self.tempDicomDatabase) if (f.endswith('.dcm') and len(f)==10)] import tempfile, shutil #cliTempDir = os.path.join(self.tempDicomDatabase,"cli"); os.makedirs(cliTempDir,exist_ok=True) cliTempDir = tempfile.mkdtemp() for inputFilePath in fileList: destFile = os.path.join(cliTempDir,os.path.split(inputFilePath)[1]) shutil.copyfile(inputFilePath, destFile) cliOutDir = os.path.join(cliTempDir,'out') os.makedirs(cliOutDir,exist_ok=True) self.delayDisplay('Testing generation of RWVM file') parameters = {} parameters['PETDICOMPath'] = cliTempDir parameters['RWVDICOMPath'] = cliOutDir SUVFactorCalculator = None SUVFactorCalculator = slicer.cli.run(slicer.modules.suvfactorcalculator, SUVFactorCalculator, parameters, wait_for_completion=True) self.assertEqual(SUVFactorCalculator.GetStatusString(), 'Completed') self.assertEqual(SUVFactorCalculator.GetParameterValue(1,14), '0.000401664') # SUVbwConversionFactor self.assertNotEqual(SUVFactorCalculator.GetParameterValue(2,1), '') # RWVMFile self.assertEqual(os.path.dirname(SUVFactorCalculator.GetParameterValue(2,1)), cliOutDir) # RWVMFile self.assertTrue(os.path.exists(SUVFactorCalculator.GetParameterValue(2,1))) # RWVMFile self.assertEqual(SUVFactorCalculator.GetParameterValue(3,0), '') # SUVBWName self.delayDisplay('Testing generation of SUV normalized volume') SUVBWName = os.path.join(cliOutDir,'SUVbw.nrrd') parameters = {} parameters['PETDICOMPath'] = cliTempDir parameters['SUVBWName'] = SUVBWName SUVFactorCalculator = None SUVFactorCalculator = slicer.cli.run(slicer.modules.suvfactorcalculator, SUVFactorCalculator, parameters, wait_for_completion=True) self.assertEqual(SUVFactorCalculator.GetStatusString(), 'Completed') self.assertEqual(SUVFactorCalculator.GetParameterValue(1,14), '0.000401664') # SUVbwConversionFactor self.assertEqual(SUVFactorCalculator.GetParameterValue(2,1), '') # RWVMFile self.assertEqual(SUVFactorCalculator.GetParameterValue(3,0), SUVBWName) # SUVBWName self.assertTrue(os.path.exists(SUVBWName)) # SUVBWName import SimpleITK as sitk img = sitk.ReadImage(SUVBWName) f = sitk.MinimumMaximumImageFilter() f.Execute(img) self.assertEqual(round(f.GetMaximum()),90.0) self.delayDisplay('Test passed!') # ------------------------------------------------------------------------------ def test_PETDicomExtensionSelfTest_Main(self): """ test PET SUV Plugin and DICOM RWVM creation """ self.delayDisplay('Checking for PET DICOM plugins') dicomWidget = slicer.modules.dicom.widgetRepresentation().self() if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 13): dicomPluginCheckbox = dicomWidget.pluginSelector.checkBoxByPlugin elif (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): dicomPluginCheckbox = dicomWidget.browserWidget.pluginSelector.checkBoxByPlugin else: dicomPluginCheckbox = dicomWidget.detailsPopup.pluginSelector.checkBoxByPlugin self.assertIn('DICOMPETSUVPlugin', dicomPluginCheckbox) self.assertIn('DICOMRWVMPlugin', dicomPluginCheckbox) self.delayDisplay('Adding PET DICOM dataset to DICOM database (including download if necessary)') self._downloadTestData() self.delayDisplay('Loading data with DICOMPETSUVPlugin') self._loadWithPlugin(self.UID, 'DICOMPETSUVPlugin') imageNode = slicer.mrmlScene.GetFirstNodeByClass('vtkMRMLScalarVolumeNode') self.assertIsNotNone(imageNode) self.delayDisplay('Testing properties of loaded SUV normalized data') self._testDataProperties(imageNode) self.delayDisplay('Testing DICOM database for created RWVM file') patientUID = DICOMUtils.getDatabasePatientUIDByPatientName(self.PatientName) studies = slicer.dicomDatabase.studiesForPatient(patientUID) series = slicer.dicomDatabase.seriesForStudy(studies[0]) RWVMSeries = None for serie in series: description = slicer.dicomDatabase.descriptionForSeries(serie) if description=='PET SUV Factors': RWVMSeries = serie self.assertIsNotNone(RWVMSeries) files = slicer.dicomDatabase.filesForSeries(RWVMSeries) self.assertTrue(len(files)>0) RWVMFile = files[0] print(RWVMFile) self.delayDisplay('Testing RealWorldValueSlope stored in RWVM file') if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): rwvm = pydicom.dcmread(RWVMFile) else: rwvm = dicom.read_file(RWVMFile) self.assertIn('ReferencedImageRealWorldValueMappingSequence', rwvm) rirwvms = rwvm.ReferencedImageRealWorldValueMappingSequence[0] self.assertIn('RealWorldValueMappingSequence', rirwvms) rwvms = rirwvms.RealWorldValueMappingSequence[0] self.assertIn('RealWorldValueSlope', rwvms) slope = rwvms.RealWorldValueSlope self.assertTrue(abs(slope-0.000401664)<0.00001) self.delayDisplay('Loading data with DICOMRWVMPlugin') slicer.mrmlScene.Clear(0) self._loadWithPlugin(RWVMSeries, 'DICOMRWVMPlugin') imageNode = slicer.mrmlScene.GetFirstNodeByClass('vtkMRMLScalarVolumeNode') self.assertIsNotNone(imageNode) self.delayDisplay('Testing properties of loaded SUV normalized data') self._testDataProperties(imageNode) self.delayDisplay('Test passed!') # ------------------------------------------------------------------------------ def _downloadTestData(self): """ download DICOM PET scan and add to DICOM database """ from six.moves.urllib.parse import urlparse, urlencode from six.moves.urllib.request import urlopen, urlretrieve from six.moves.urllib.error import HTTPError quantity = slicer.vtkCodedEntry() quantity.SetFromString('CodeValue:126400|CodingSchemeDesignator:DCM|CodeMeaning:Standardized Uptake Value') units = slicer.vtkCodedEntry() units.SetFromString('CodeValue:{SUVbw}g/ml|CodingSchemeDesignator:UCUM|CodeMeaning:Standardized Uptake Value body weight') url = 'http://github.com/QIICR/PETTumorSegmentation/releases/download/4.10.2/QIN-HEADNECK-01-0139-PET.zip' zipFile = 'QIN-HEADNECK-01-0139-PET.zip' suvNormalizationFactor = 0.00040166400000000007 destinationDirectory = self.tempDicomDatabase filePath = os.path.join(destinationDirectory, zipFile) # download dataset if necessary if not len(slicer.dicomDatabase.filesForSeries(self.UID)): filePath = os.path.join(destinationDirectory, zipFile) if not os.path.exists(os.path.dirname(filePath)): os.makedirs(os.path.dirname(filePath)) logging.debug('Saving download %s to %s ' % (url, filePath)) if not os.path.exists(filePath) or os.stat(filePath).st_size == 0: slicer.util.delayDisplay('Requesting download of %s...\n' % url, 1000) urlretrieve(url, filePath) if os.path.exists(filePath) and os.path.splitext(filePath)[1]=='.zip': success = slicer.app.applicationLogic().Unzip(filePath, destinationDirectory) if not success: logging.error("Archive %s was NOT unzipped successfully." % filePath) indexer = ctk.ctkDICOMIndexer() indexer.addDirectory(slicer.dicomDatabase, destinationDirectory, None) indexer.waitForImportFinished() # ------------------------------------------------------------------------------ def _loadWithPlugin(self, UID, pluginName): dicomWidget = slicer.modules.dicom.widgetRepresentation().self() if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 13): dicomPluginCheckbox = dicomWidget.pluginSelector.checkBoxByPlugin elif (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): dicomPluginCheckbox = dicomWidget.browserWidget.pluginSelector.checkBoxByPlugin else: dicomPluginCheckbox = dicomWidget.detailsPopup.pluginSelector.checkBoxByPlugin dicomPluginStates = {(key,value.checked) for key,value in dicomPluginCheckbox.items()} for cb in list(dicomPluginCheckbox.values()): cb.checked=False dicomPluginCheckbox[pluginName].checked = True success=DICOMUtils.loadSeriesByUID([UID]) for key,value in dicomPluginStates: dicomPluginCheckbox[key].checked=value # ------------------------------------------------------------------------------ def _testDataProperties(self, imageNode): units = imageNode.GetVoxelValueUnits() self.assertTrue(units.GetCodeMeaning()=='Standardized Uptake Value body weight') self.assertTrue(units.GetCodeValue()=='{SUVbw}g/ml') self.assertTrue(units.GetCodingSchemeDesignator()=='UCUM') quantity = imageNode.GetVoxelValueQuantity() self.assertTrue(quantity.GetCodeMeaning()=='Standardized Uptake Value') self.assertTrue(quantity.GetCodeValue()=='126400') self.assertTrue(quantity.GetCodingSchemeDesignator()=='DCM') scalarRange = imageNode.GetImageData().GetScalarRange() self.assertTrue(abs(scalarRange[0]-0)<0.01) self.assertTrue(abs(scalarRange[1]-89.85876418551707)<0.01)
Python
3D
QIICR/Slicer-PETDICOMExtension
DICOMPETSUVPlugin/DICOMPETSUVPlugin.py
.py
9,543
264
import os import sys as SYS from __main__ import vtk, qt, ctk, slicer from DICOMLib import DICOMPlugin from DICOMLib import DICOMLoadable if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): import pydicom else: import dicom import DICOMLib import math as math # # This is the plugin to handle PET SUV volumes # from DICOM files into MRML nodes. It follows the DICOM module's # plugin architecture. # class DICOMPETSUVPluginClass(DICOMPlugin): """ PET specific interpretation code """ def __init__(self): super(DICOMPETSUVPluginClass,self).__init__() #print "DICOMPETSUVPlugin __init__()" self.loadType = "PET SUV Plugin" self.tags['patientID'] = "0010,0020" self.tags['patientName'] = "0010,0010" self.tags['patientBirthDate'] = "0010,0030" self.tags['patientSex'] = "0010,0040" self.tags['patientHeight'] = "0010,1020" self.tags['patientWeight'] = "0010,1030" self.tags['relatedSeriesSequence'] = "0008,1250" self.tags['radioPharmaconStartTime'] = "0018,1072" self.tags['decayCorrection'] = "0054,1102" self.tags['decayFactor'] = "0054,1321" self.tags['frameRefTime'] = "0054,1300" self.tags['radionuclideHalfLife'] = "0018,1075" self.tags['contentTime'] = "0008,0033" self.tags['seriesTime'] = "0008,0031" self.tags['seriesDescription'] = "0008,103e" self.tags['seriesModality'] = "0008,0060" self.tags['seriesInstanceUID'] = "0020,000E" self.tags['sopInstanceUID'] = "0008,0018" self.tags['seriesInstanceUID'] = "0020,000e" self.tags['studyInstanceUID'] = "0020,000D" self.tags['studyDate'] = "0008,0020" self.tags['studyTime'] = "0008,0030" self.tags['studyID'] = "0020,0010" self.tags['rows'] = "0028,0010" self.tags['columns'] = "0028,0011" self.tags['spacing'] = "0028,0030" self.tags['position'] = "0020,0032" self.tags['orientation'] = "0020,0037" self.tags['pixelData'] = "7fe0,0010" self.tags['referencedImageRWVMappingSeq'] = "0008,1140" self.fileLists = [] self.patientName = "" self.patientBirthDate = "" self.patientSex = "" self.ctTerm = "CT" self.petTerm = "PT" self.scalarVolumePlugin = slicer.modules.dicomPlugins['DICOMScalarVolumePlugin']() self.rwvPlugin = slicer.modules.dicomPlugins['DICOMRWVMPlugin']() def __getDirectoryOfImageSeries(self, sopInstanceUID): f = slicer.dicomDatabase.fileForInstance(sopInstanceUID) return os.path.dirname(f) def __getSeriesInformation(self,seriesFiles,dicomTag): if seriesFiles: return slicer.dicomDatabase.fileValue(seriesFiles[0],dicomTag) def examine(self,fileLists): """ Returns a list of DICOMLoadable instances corresponding to ways of interpreting the fileLists parameter. """ loadables = [] # get from cache or create new loadables for fileList in fileLists: cachedLoadables = self.getCachedLoadables(fileList) if cachedLoadables: loadables += cachedLoadables else: if slicer.dicomDatabase.fileValue(fileList[0],self.tags['seriesModality']) == "PT": # check if PET series already has Real World Value Mapping hasRWVM = False if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): ptFile = pydicom.dcmread(fileList[0]) else: ptFile = dicom.read_file(fileList[0]) studyUID = slicer.dicomDatabase.fileValue(fileList[0],self.tags['studyInstanceUID']) for series in slicer.dicomDatabase.seriesForStudy(studyUID): if ptFile.SeriesInstanceUID != series: for seriesFile in slicer.dicomDatabase.filesForSeries(series): if slicer.dicomDatabase.fileValue(seriesFile,self.tags['seriesModality']) == "RWV": if ptFile.SeriesInstanceUID == self.getReferencedSeriesInstanceUID(seriesFile): hasRWVM = True loadablesForFiles = self.rwvPlugin.getLoadablePetSeriesFromRWVMFile(seriesFile) for loadable in loadablesForFiles: loadable.confidence = 1.0 self.abbreviateLoadableName(loadable) loadables += loadablesForFiles self.cacheLoadables(fileList,loadablesForFiles) if not hasRWVM: # Call SUV Factor Calculator to create RWVM files for this PET series rwvmFile = self.generateRWVMforFileList(fileList) loadablesForFiles = self.rwvPlugin.getLoadablePetSeriesFromRWVMFile(rwvmFile) for loadable in loadablesForFiles: loadable.confidence = 0.95 self.abbreviateLoadableName(loadable) self.cacheLoadables(fileList,loadablesForFiles) # there may be multiple loadables per one RWV series, add it only # once. Note we only add RWV to the DB if we create a new RWV # instance. loadablesForFiles[0].derivedItems = [rwvmFile] loadables += loadablesForFiles return loadables def generateRWVMforFileList(self, fileList): """Return a list of loadables after generating Real World Value Mapping objects for a PET series """ loadables = [] # Call SUV Factor Calculator module sopInstanceUID = self.__getSeriesInformation(fileList, self.tags['sopInstanceUID']) seriesDirectory = self.__getDirectoryOfImageSeries(sopInstanceUID) # copy files to a temp location, since otherwise the command line can easily exceed # the maximum on Windows (~8k characters) import tempfile, shutil cliTempDir = os.path.join(tempfile.mkdtemp()) for inputFilePath in fileList: destFile = os.path.join(cliTempDir,os.path.split(inputFilePath)[1]) shutil.copyfile(inputFilePath, destFile) parameters = {} parameters['PETDICOMPath'] = cliTempDir parameters['RWVDICOMPath'] = seriesDirectory parameters['PETSeriesInstanceUID'] = self.__getSeriesInformation(fileList, self.tags['seriesInstanceUID']) SUVFactorCalculator = None SUVFactorCalculator = slicer.cli.run(slicer.modules.suvfactorcalculator, SUVFactorCalculator, parameters, wait_for_completion=True) shutil.rmtree(cliTempDir) if SUVFactorCalculator.GetStatusString() != 'Completed': raise RuntimeError("SUVFactorCalculator CLI did not complete cleanly") rwvFile = SUVFactorCalculator.GetParameterDefault(2,1) return rwvFile def getReferencedSeriesInstanceUID(self, rwvmFile): """Helper method to read the Referenced Series Instance UID from an RWVM file""" if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): dicomFile = pydicom.dcmread(rwvmFile) else: dicomFile = dicom.read_file(rwvmFile) refSeriesSeq = dicomFile.ReferencedSeriesSequence return refSeriesSeq[0].SeriesInstanceUID def abbreviateLoadableName(self, loadable): """Helper method to shorten the name of the SUV conversion """ if "Standardized Uptake Value body weight" in loadable.name: loadable.name = (loadable.name).replace('Standardized Uptake Value body weight','(SUVbw)') loadable.selected = True elif "Standardized Uptake Value ideal body weight" in loadable.name: loadable.name = (loadable.name).replace('Standardized Uptake Value ideal body weight','(SUVibw)') elif "Standardized Uptake Value lean body mass" in loadable.name: loadable.name = (loadable.name).replace('Standardized Uptake Value lean body mass','(SUVlbm)') elif "Standardized Uptake Value body surface area" in loadable.name: loadable.name = (loadable.name).replace('Standardized Uptake Value body surface area','(SUVbsa)') return def load(self,loadable): """Load the series into Slicer""" # Call the DICOMRWVMPlugin to get the image node imageNode = self.rwvPlugin.loadPetSeries(loadable) return imageNode # # DICOMPETSUVPlugin # class DICOMPETSUVPlugin: """ This class is the 'hook' for slicer to detect and recognize the plugin as a loadable scripted module """ def __init__(self, parent): parent.title = "DICOM PET SUV Volume Plugin" parent.categories = ["Developer Tools.DICOM Plugins"] parent.contributors = ["Ethan Ulrich (Univ. of Iowa)"] parent.helpText = """ Plugin to the DICOM Module to parse and load PET volumes from DICOM files. Provides options for standardized uptake values. No module interface here, only in the DICOM module """ parent.acknowledgementText = """ This DICOM Plugin was developed by Ethan Ulrich, Univ. of Iowa and was partially funded by NIH grant U24 CA180918. """ # don't show this module - it only appears in the DICOM module parent.hidden = True # Add this extension to the DICOM module's list for discovery when the module # is created. Since this module may be discovered before DICOM itself, # create the list if it doesn't already exist. try: slicer.modules.dicomPlugins except AttributeError: slicer.modules.dicomPlugins = {} slicer.modules.dicomPlugins['DICOMPETSUVPlugin'] = DICOMPETSUVPluginClass # # DICOMPETSUVWidget # class DICOMPETSUVWidget: def __init__(self, parent = None): self.parent = parent def setup(self): # don't display anything for this widget - it will be hidden anyway pass def enter(self): pass def exit(self): pass
Python
3D
QIICR/Slicer-PETDICOMExtension
DICOMRWVMPlugin/DICOMRWVMPlugin.py
.py
15,669
396
import os import sys as SYS from __main__ import vtk, qt, ctk, slicer from DICOMLib import DICOMPlugin from DICOMLib import DICOMLoadable if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): import pydicom else: import dicom import DICOMLib import math as math class CodedValueTuple: def __init__(self, CodeValue=None, CodeMeaning=None, CodingSchemeDesignator=None): self.CodeValue = CodeValue self.CodeMeaning = CodeMeaning self.CodingSchemeDesignator = CodingSchemeDesignator def getDictionary(self): return {"CodeValue":self.CodeValue, "CodeMeaning":self.CodeMeaning, "CodingSchemeDesignator":self.CodingSchemeDesignator} # # This is the plugin to handle Real World Value Mapping objects # from DICOM files into MRML nodes. It follows the DICOM module's # plugin architecture. # class DICOMRWVMPluginClass(DICOMPlugin): """ PET specific interpretation code """ def __init__(self): super(DICOMRWVMPluginClass,self).__init__() self.epsilon = 0.01 self.loadType = "Real World Value Mapping Plugin" self.tags['patientID'] = "0010,0020" self.tags['patientName'] = "0010,0010" self.tags['patientBirthDate'] = "0010,0030" self.tags['patientSex'] = "0010,0040" self.tags['patientHeight'] = "0010,1020" self.tags['patientWeight'] = "0010,1030" self.tags['referencedSeriesSequence'] = "0008,1115" self.tags['contentTime'] = "0008,0033" self.tags['seriesTime'] = "0008,0031" self.tags['triggerTime'] = "0018,1060" self.tags['diffusionGradientOrientation'] = "0018,9089" self.tags['imageOrientationPatient'] = "0020,0037" self.tags['numberOfFrames'] = "0028,0008" self.tags['seriesDescription'] = "0008,103e" self.tags['seriesModality'] = "0008,0060" self.tags['seriesInstanceUID'] = "0020,000E" self.tags['sopInstanceUID'] = "0008,0018" self.tags['studyInstanceUID'] = "0020,000D" self.tags['studyDate'] = "0008,0020" self.tags['studyTime'] = "0008,0030" self.tags['studyID'] = "0020,0010" self.tags['rows'] = "0028,0010" self.tags['columns'] = "0028,0011" self.tags['spacing'] = "0028,0030" self.tags['position'] = "0020,0032" self.tags['orientation'] = "0020,0037" self.tags['pixelData'] = "7fe0,0010" self.tags['referencedImageRWVMappingSeq'] = "0040,9094" self.scalarVolumePlugin = slicer.modules.dicomPlugins['DICOMScalarVolumePlugin']() def __getDirectoryOfImageSeries(self, sopInstanceUID): f = slicer.dicomDatabase.fileForInstance(sopInstanceUID) return os.path.dirname(f) def __getSeriesInformation(self,seriesFiles,dicomTag): if seriesFiles: return slicer.dicomDatabase.fileValue(seriesFiles[0],dicomTag) def examine(self,fileLists): """ Returns a list of DICOMLoadable instances corresponding to ways of interpreting the fileLists parameter. """ loadables = [] # get from cache or create new loadables for fileList in fileLists: cachedLoadables = self.getCachedLoadables(fileList) if cachedLoadables: loadables += cachedLoadables else: if slicer.dicomDatabase.fileValue(fileList[0],self.tags['seriesModality']) == "RWV": if len(fileList)>1: # TODO: look into logging using ctkFileLog print('Warning: series contains more than 1 RWV instance! Only first one is considered!') loadablesForFiles = self.getLoadablesFromRWVMFile(fileList[0]) loadables += loadablesForFiles self.cacheLoadables(fileList[0],loadablesForFiles) return loadables def getLoadablesFromRWVMFile(self, file): rwvLoadable = DICOMLib.DICOMLoadable() rwvLoadable.files.append(file) rwvLoadable.patientName = self.__getSeriesInformation(rwvLoadable.files, self.tags['patientName']) rwvLoadable.patientID = self.__getSeriesInformation(rwvLoadable.files, self.tags['patientID']) rwvLoadable.studyDate = self.__getSeriesInformation(rwvLoadable.files, self.tags['studyDate']) if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): dicomFile = pydicom.dcmread(file) else: dicomFile = dicom.read_file(file) rwvmSeq = dicomFile.ReferencedImageRealWorldValueMappingSequence[0].RealWorldValueMappingSequence unitsSeq = rwvmSeq[0].MeasurementUnitsCodeSequence rwvLoadable.name = rwvLoadable.patientName + ' ' + self.convertStudyDate(rwvLoadable.studyDate) + ' ' + unitsSeq[0].CodeMeaning rwvLoadable.unitName = unitsSeq[0].CodeMeaning (quantity,units) = self.getQuantityAndUnitsFromDICOM(dicomFile) rwvLoadable.quantity = quantity rwvLoadable.units = units rwvLoadable.tooltip = rwvLoadable.name rwvLoadable.selected = True rwvLoadable.confidence = 0.90 return [rwvLoadable] def getLoadablePetSeriesFromRWVMFile(self, file): """ Returns DICOMLoadable instances associated with an RWVM object.""" newLoadables = [] if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): dicomFile = pydicom.dcmread(file) else: dicomFile = dicom.read_file(file) if dicomFile.Modality == "RWV": refRWVMSeq = dicomFile.ReferencedImageRealWorldValueMappingSequence refSeriesSeq = dicomFile.ReferencedSeriesSequence if refRWVMSeq: # May have more than one RWVM value, create loadables for each for item in refRWVMSeq: rwvLoadable = DICOMLib.DICOMLoadable() # Get the referenced files from the database refImageSeq = item.ReferencedImageSequence instanceFiles = [] for instance in refImageSeq: uid = instance.ReferencedSOPInstanceUID if uid: instanceFiles += [slicer.dicomDatabase.fileForInstance(uid)] # Get the Real World Values rwvLoadable.files = instanceFiles rwvLoadable.rwvFile = file rwvLoadable.patientName = self.__getSeriesInformation(rwvLoadable.files, self.tags['patientName']) rwvLoadable.patientID = self.__getSeriesInformation(rwvLoadable.files, self.tags['patientID']) rwvLoadable.studyDate = self.__getSeriesInformation(rwvLoadable.files, self.tags['studyDate']) rwvmSeq = item.RealWorldValueMappingSequence unitsSeq = rwvmSeq[0].MeasurementUnitsCodeSequence rwvLoadable.name = rwvLoadable.patientName + ' ' + self.convertStudyDate(rwvLoadable.studyDate) + ' ' + unitsSeq[0].CodeMeaning rwvLoadable.tooltip = rwvLoadable.name (quantity,units) = self.getQuantityAndUnitsFromDICOM(dicomFile) rwvLoadable.quantity = quantity rwvLoadable.units = units rwvLoadable.confidence = 0.90 rwvLoadable.selected = True # added by CB rwvLoadable.slope = rwvmSeq[0].RealWorldValueSlope rwvLoadable.referencedSeriesInstanceUID = refSeriesSeq[0].SeriesInstanceUID # determine modality of referenced series refSeriesFiles = slicer.dicomDatabase.filesForSeries(refSeriesSeq[0].SeriesInstanceUID) if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion == 4 and slicer.app.minorVersion >= 11): refSeriesFile0 = pydicom.dcmread(refSeriesFiles[0]) else: refSeriesFile0 = dicom.read_file(refSeriesFiles[0]) rwvLoadable.referencedModality = refSeriesFile0.Modality # add radiopharmaceutical info if PET if rwvLoadable.referencedModality == 'PT': print('Found Referenced PET series') ris = refSeriesFile0.RadiopharmaceuticalInformationSequence[0] try: # TODO Many DICOM series do not have radiopharmaceutical code sequence! rcs = ris.RadiopharmaceuticalCodeSequence if len(rcs) > 0: rwvLoadable.RadiopharmaceuticalCodeValue = rcs[0].CodeValue except AttributeError: print('WARNING: series does not have radiopharmaceutical code sequence.') try: rcs = ris.RadionuclideCodeSequence if len(rcs) > 0: rwvLoadable.RadionuclideCodeValue = rcs[0].CodeValue except AttributeError: print('WARNING: Cannot find radionuclide info for PET Series.') self.sortLoadableSeriesFiles(rwvLoadable) newLoadables.append(rwvLoadable) return newLoadables def getQuantityAndUnitsFromDICOM(self, dicomObject): try: units = slicer.vtkCodedEntry() quantity = slicer.vtkCodedEntry() rwvmSeq = dicomObject.ReferencedImageRealWorldValueMappingSequence[0].RealWorldValueMappingSequence unitsSeq = rwvmSeq[0].MeasurementUnitsCodeSequence units.SetValueSchemeMeaning(unitsSeq[0].CodeValue, unitsSeq[0].CodingSchemeDesignator, unitsSeq[0].CodeMeaning) quantitySeq = rwvmSeq[0][0x0040,0x9220] for qsItem in quantitySeq: if qsItem.ConceptNameCodeSequence[0].CodeMeaning == "Quantity": concept = qsItem.ConceptCodeSequence[0] quantity.SetValueSchemeMeaning(concept.CodeValue, concept.CodingSchemeDesignator, concept.CodeMeaning) return (quantity,units) except: return (None,None) def convertStudyDate(self, studyDate): """Return a readable study date string """ if len(studyDate)==8: studyDate = studyDate[:4] + '-' + studyDate[4:6] + '-' + studyDate[6:] return studyDate def sortLoadableSeriesFiles(self, loadable): scalarVolumePlugin = slicer.modules.dicomPlugins['DICOMScalarVolumePlugin']() svLoadables = scalarVolumePlugin.examine([loadable.files]) if not len(svLoadables): print('Error: failed to parse PET volume!') return else: loadable.files = svLoadables[0].files return def load(self,loadable): loadablePetSeries = self.getLoadablePetSeriesFromRWVMFile( loadable.files[0] ) return self.loadPetSeries(loadablePetSeries[0]) def loadPetSeries(self, loadable): """Use the conversion factor to load the volume into Slicer""" conversionFactor = loadable.slope # Create volume node imageNode = self.scalarVolumePlugin.loadFilesWithArchetype(loadable.files, loadable.name) if imageNode: # apply the conversion factor multiplier = vtk.vtkImageMathematics() multiplier.SetOperationToMultiplyByK() multiplier.SetConstantK(float(conversionFactor)) multiplier.SetInput1Data(imageNode.GetImageData()) multiplier.Update() imageNode.GetImageData().DeepCopy(multiplier.GetOutput()) # create list of DICOM instance UIDs corresponding to the loaded files instanceUIDs = "" for dicomFile in loadable.files: uid = slicer.dicomDatabase.fileValue(dicomFile,self.tags['sopInstanceUID']) if uid == "": uid = "Unknown" instanceUIDs += uid + " " instanceUIDs = instanceUIDs[:-1] # strip last space # get the instance UID for the RWVM object derivedItemUID = "" try: derivedItemUID = slicer.dicomDatabase.fileValue(loadable.rwvFile,self.tags['sopInstanceUID']) except AttributeError: # no derived items pass if loadable.quantity: imageNode.SetVoxelValueQuantity(loadable.quantity) if loadable.units: imageNode.SetVoxelValueUnits(loadable.units) # Keep references to the PET instances, as these may be needed to # establish correspondence between slice annotations and acutal slices, # but also keep the RWVM instance UID ... it's confusing, but not sure # if there is a better way in Slicer for now imageNode.SetAttribute("DICOM.instanceUIDs", instanceUIDs) imageNode.SetAttribute("DICOM.RWV.instanceUID", derivedItemUID) # automatically select the volume to display volumeLogic = slicer.modules.volumes.logic() appLogic = slicer.app.applicationLogic() selNode = appLogic.GetSelectionNode() selNode.SetReferenceActiveVolumeID(imageNode.GetID()) appLogic.PropagateVolumeSelection() # Change display displayNode = imageNode.GetVolumeDisplayNode() displayNode.SetInterpolate(0) if loadable.referencedModality == 'PT': radiopharmaceuticalCode = '' try: radiopharmaceuticalCode = loadable.RadiopharmaceuticalCodeValue imageNode.SetAttribute('DICOM.RadiopharmaceuticalCodeValue',radiopharmaceuticalCode) print('Found Radiopharmaceutical Code ' + radiopharmaceuticalCode) except AttributeError: imageNode.SetAttribute('DICOM.RadiopharmaceuticalCodeValue','unknown') # use radionuclide info instead radionuclideCode = '' try: radionuclideCode = loadable.RadionuclideCodeValue imageNode.SetAttribute('DICOM.RadionuclideCodeValue',radionuclideCode) print('Found Radionuclide Code ' + radionuclideCode) except AttributeError: imageNode.SetAttribute('DICOM.RadionuclideCodeValue','unknown') if radiopharmaceuticalCode == 'C-B1031': # FDG displayNode.AutoWindowLevelOff() displayNode.SetWindowLevel(6,3) displayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeInvertedGrey') elif radiopharmaceuticalCode == 'C-B1036': # FLT displayNode.AutoWindowLevelOff() displayNode.SetWindowLevel(4,2) displayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeInvertedGrey') else: # Default W/L if no info about radiopharmaceutical can be found, often FDG displayNode.AutoWindowLevelOff() displayNode.SetWindowLevel(6,3) displayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeInvertedGrey') else: displayNode.SetAutoWindowLevel(1) # Change name name = (loadable.name).replace(' ','_') imageNode.SetName(name) # create Subject Hierarchy nodes for the loaded series self.addSeriesInSubjectHierarchy(loadable,imageNode) return imageNode # # DICOMRWVMPlugin # class DICOMRWVMPlugin: """ This class is the 'hook' for slicer to detect and recognize the plugin as a loadable scripted module """ def __init__(self, parent): parent.title = "DICOM Real World Value Mapping Plugin" parent.categories = ["Developer Tools.DICOM Plugins"] parent.contributors = ["Ethan Ulrich (Univ. of Iowa), Andrey Fedorov (BWH)"] parent.helpText = """ Plugin to the DICOM Module to parse and load DICOM series associated with Real World Value Mapping objects. Provides options for standardized uptake values. No module interface here, only in the DICOM module """ parent.acknowledgementText = """ This DICOM Plugin was developed by Ethan Ulrich, Univ. of Iowa and was partially funded by NIH grant U24 CA180918. """ # don't show this module - it only appears in the DICOM module parent.hidden = True # Add this extension to the DICOM module's list for discovery when the module # is created. Since this module may be discovered before DICOM itself, # create the list if it doesn't already exist. try: slicer.modules.dicomPlugins except AttributeError: slicer.modules.dicomPlugins = {} slicer.modules.dicomPlugins['DICOMRWVMPlugin'] = DICOMRWVMPluginClass # # DICOMPETSUVWidget # class DICOMPETSUVWidget: def __init__(self, parent = None): self.parent = parent def setup(self): # don't display anything for this widget - it will be hidden anyway pass def enter(self): pass def exit(self): pass
Python
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
sphere_image_creation_task_2.m
.m
3,314
105
close all; clear all; clc; %%%this code block is used to create the images %%%%% %%%later part of this code block is used to save images %%%%%%% %%theta=(2*pi)/180; %in radian %%r=2; %%Xus=10; angle=-(pi/10):theta:(pi/10); %%outf this angle no images are taken [alpha,beta]=size(angle); %num_pixel=Xus/0.1; im=zeros(Xus/0.1,Xus/0.1,beta); for alpha=1:beta notice=waitbar(0,'loading visualization angles, please wait...'); % Define the input grid (in 3D) %%that sutta code figure; xlabel('X');ylabel('Y');zlabel('Z'); [x3, y3, z3] = meshgrid(linspace(-10,10)); % Compute the implicitly defined function x^2 + (y-6)^2 + z^3 - 2^2 = 0 f1 = x3.^2 + (y3-6).^2 + z3.^2 - 2^2; %%why is this taking an elipse % This surface plane, which can also be expressed as f2 = z3-tan(angle(alpha))*y3 - 0*x3.^3 ; % Also compute plane in the 'traditional' way. [x2, y2] = meshgrid(linspace(-10,10)); z2 = tan(angle(alpha))*y2 - 0*x2; % Visualize the two surfaces. patch(isosurface(x3, y3, z3, f1, 0), 'FaceColor', [0.5 1.0 0.5], 'EdgeColor', 'none'); patch(isosurface(x3, y3, z3, f2, 0), 'FaceColor', [1.0 0.0 0.0], 'EdgeColor', 'none'); view(3); camlight; axis vis3d; set(gca,'xlim',[-5 5 ], 'ylim',[1 11], 'zlim',[-5 5]); %title('visualization at',angle(alpha),'radians'); % Find the difference field. f3 = f1 - f2; % Interpolate the difference field on the explicitly defined surface. f3s = interp3(x3, y3, z3, f3, x2, y2, z2); % Find the contour where the difference (on the surface) is zero. C = contours(x2, y2, f3s, [0 0]); % Extract the x- and y-locations from the contour matrix C. xL = C(1, 2:end); yL = C(2, 2:end); % Interpolate on the first surface to find z-locations for the intersection % line. if ~isempty(xL) & ~isempty(yL) zL = interp2(x2, y2, z2, xL, yL); % Visualize the line. line(xL,yL,zL,'Color','k','LineWidth',3); %xL_1=ceil(xL/0.2)+50; %conversion to pixels %yL_1=ceil(yL/0.2)+50; %conversion to pixels %%commented since errors max_xL=max(xL); min_xL=min(xL); max_yL=max(yL); min_yL=min(yL); %%direct substuition into eclipse formula a=(max_xL-min_xL)/2; %average to reduce errors b=(max_yL-min_yL)/2; a=a/0.1; %convertion to pixels b=b/0.1; H=0;Y=0; %center of the eclipse for i=1:num_pixel for j=1:num_pixel if ((i-num_pixel/2)/a)^2+((j-num_pixel/2)/b)^2 <= 1 %no need to worry about the world cordinates for now im(i,j,alpha)=255; end end end end %figure, %imshow(im); close(notice) end %%%%%%image save%%%%%%%% %%% a total of 61 image slices will be saved <as 0 is included as well>%%%%%%%% %%% need to add a total of 21 empty images both at the front and back %%%%%%%%% im_main=zeros(Xus/0.1,Xus/0.1,61); for i=[22:40] im_main(:,:,i)=im(:,:,i-21); end %%run this loop if you wish to save the images %%for i=[1:61] %%imsave(im_main(:,:,i)); %%end
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
test_6_voxel_visualization.m
.m
703
26
% This scrypt illustrates the use of VoxelPlotter function to visualize % voxel data stored in a 3d matrix clear all close all clc %Generating sinthetic input gridesize=16; R=8; VoxelMat=zeros(gridesize,gridesize,gridesize); for i=1:gridesize for j=1:gridesize for k=1:gridesize if (i-gridesize/2)^2+(j-gridesize/2)^2+(k-gridesize/2)^2<R^2 VoxelMat(i,j,k)=1; end end end end [vol_handle]=VoxelPlotter(VoxelMat,1); %visual effects (I recommend using the FigureRotator function from MATLAB %Centeral view(3); daspect([1,1,1]); set(gca,'xlim',[0 gridesize], 'ylim',[0 gridesize], 'zlim',[0 gridesize]);
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
test_1.m
.m
1,769
78
%test - dicom image process = close all; clear all ; clc ; m=dicominfo('bmode.dcm'); X=dicomread(im); a_1=rgb2gray(X(29:568,235:790,:,1)); %extraction of first image // the range to extract jst the image a_2=rgb2gray(X(29:568,235:790,:,2)); %extraction of the second image figure, subplot(1,2,1);imshow(X(:,:,:,1)); subplot(1,2,2);imshow(X(:,:,:,2)); impixelinfo; %figure, %i=[1:36]; %montage(X(29:568,235:790,:,i)); %detect feature points imagepoints1=detectSURFFeatures(a_1,'MetricThreshold',1000); imagepoints2=detectSURFFeatures(a_2,'MetricThreshold',1000); %extract feature discryptors feature_1=extractFeatures(a_1,imagepoints1); feature_2=extractFeatures(a_2,imagepoints2); %plot the extracted features figure; subplot(1,2,1); imshow(a_1); hold on; plot(selectStrongest(imagepoints1,500)); subplot(1,2,2); imshow(a_2); hold on; plot(selectStrongest(imagepoints2,500)); title('extracted features on the image'); %MATCH EXTRACTED FEATURES %indexPairs_1=matchFeatures(feature_1,feature_2,'maxRatio',0.9); %ommitted as too many outliers are produced indexPairs_1=matchFeatures(feature_1,feature_2,'Unique',true); matchedPoints1 = imagepoints1(indexPairs_1(:, 1)); matchedPoints2 = imagepoints2(indexPairs_1(:, 2)); figure; showMatchedFeatures(a_1,a_2,matchedPoints1,matchedPoints2); title('features matched in the original image'); %outliers delete c=[]; for z=1:422 if sumsqr(matchedPoints2(z).Location-matchedPoints1(z).Location)>10 c=[c,z]; end end matchedPoints1(c)=[]; matchedPoints2(c)=[]; figure; showMatchedFeatures(a_1,a_2,matchedPoints1,matchedPoints2); title('features matched after deletion of outliers'); %create point cloud
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
createmainimage.m
.m
3,485
108
function [ im_main ] = createmainimage(Xus,r,theta,num_pixel) %%%this code block is used to create the images %%%%% %%%later part of this code block is used to save images %%%%%%% %%theta=(2*pi)/180; %in radian %%r=2; %%Xus=10; angle=-(pi/10):theta:(pi/10); %%outf this angle no images are taken [alpha,beta]=size(angle); %num_pixel=Xus/0.1; im=zeros(Xus/0.1,Xus/0.1,beta); for alpha=1:beta notice=waitbar(0,'loading visualization at an angle, please wait...'); % Define the input grid (in 3D) %%that sutta code figure; [x3, y3, z3] = meshgrid(linspace(-10,10)); % Compute the implicitly defined function x^2 + (y-6)^2 + z^3 - 2^2 = 0 f1 = x3.^2 + (y3-6).^2 + z3.^2 - 2^2; %%why is this taking an elipse % This surface plane, which can also be expressed as f2 = z3-tan(angle(alpha))*y3 - 0*x3.^3 ; % Also compute plane in the 'traditional' way. [x2, y2] = meshgrid(linspace(-10,10)); z2 = tan(angle(alpha))*y2 - 0*x2; % Visualize the two surfaces. patch(isosurface(x3, y3, z3, f1, 0), 'FaceColor', [0.5 1.0 0.5], 'EdgeColor', 'none'); patch(isosurface(x3, y3, z3, f2, 0), 'FaceColor', [1.0 0.0 0.0], 'EdgeColor', 'none'); view(3); camlight; axis vis3d; set(gca,'xlim',[-5 5 ], 'ylim',[1 11], 'zlim',[-5 5]); xlabel('X');ylabel('Y');zlabel('Z'); %title('visualization at',angle(alpha),'radians'); % Find the difference field. f3 = f1 - f2; % Interpolate the difference field on the explicitly defined surface. f3s = interp3(x3, y3, z3, f3, x2, y2, z2); % Find the contour where the difference (on the surface) is zero. C = contours(x2, y2, f3s, [0 0]); % Extract the x- and y-locations from the contour matrix C. xL = C(1, 2:end); yL = C(2, 2:end); % Interpolate on the first surface to find z-locations for the intersection % line. if ~isempty(xL) & ~isempty(yL) zL = interp2(x2, y2, z2, xL, yL); % Visualize the line. line(xL,yL,zL,'Color','k','LineWidth',3); %xL_1=ceil(xL/0.2)+50; %conversion to pixels %yL_1=ceil(yL/0.2)+50; %conversion to pixels %%commented since errors max_xL=max(xL); min_xL=min(xL); max_yL=max(yL); min_yL=min(yL); %%direct substuition into eclipse formula a=(max_xL-min_xL)/2; %average to reduce errors b=(max_yL-min_yL)/2; a=a/0.1; %convertion to pixels b=b/0.1; H=0;Y=0; %center of the eclipse for i=1:num_pixel for j=1:num_pixel if ((i-num_pixel/2)/a)^2+((j-num_pixel/2)/b)^2 <= 1 %no need to worry about the world cordinates for now im(i,j,alpha)=255; end end end end %figure, %imshow(im); close(notice) end %%%%%%image save%%%%%%%% %%% a total of 61 image slices will be saved <as 0 is included as well>%%%%%%%% %%% need to add a total of 21 empty images both at the front and back %%%%%%%%% im_main=zeros(Xus/0.1,Xus/0.1,61); for i=[22:40] im_main(:,:,i)=im(:,:,i-21); end %%run this loop if you wish to save the images %%image_index=1; %%filename = 'p%d.bmp'; %%filename = sprintf(filename,image_index); %%image_index = image_index + 1; %%imwrite(im,filename); im_main=uint8(im_main); end
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
test_3_creating_a_bmode_image.m
.m
4,975
125
function [imageOut, rfEnvelope] = fcnPseudoBmodeUltrasoundSimulator(echoModel,f0,c,sigma_x,sigma_y,speckleVariance) % fcnPseudoBmodeUltrasoundSimulator generates a simulated Pseudo B-Mode % Ultrasound image given the echogenicity model for the structure to be % imaged. % % OUTPUTIMAGE = fcnPseudoBmodeUltrasoundSimulator(ECHO_MODEL) generates % the simulated B-Mode Ultrasound image using default parameter settings. % The size of the simulated image is same as size of the echogenity % matrix provided. The OUTPUTIMAGE is of type uint8. Supported classes % for ECHO_MODEL are uint8, uint16, single, double. % % OUTPUTIMAGE = fcnPseudoBmodeUltrasoundSimulator(ECHO_MODEL, F_0, C, % SIGMA_X, SIGMA_Y, SPECKLE_VARIANCE) generates the simulated B-Mode % Ultrasound image using the specified parameters based on the ECHO_MODEL % provided. % % F_0 - Center frequency of the ultrasonic wave. Default valus is 10e6 % C - Velocity of sound in media (m/s). Default valus is 1540 % SIGMA_X - Pulse-width of transmitting ultrasonic wave. Default % value is 2 % SIGMA_Y - Beam-width) of transmitting ultrasonic wave. Default % value is 1.5 % SPECKLE_VARIANCE - Variance of Speckle distribution of the media. % Default value is 0.01 % % [OUTPUTIMAGE, RF_ENVELOPE] = fcnBPDFHE(...) returns also the % rf Envelope matrix for further usage. % % Details of the method are available in % % Yongjian Yu, Acton, S.T., "Speckle reducing anisotropic diffusion," % IEEE Trans. Image Processing, vol. 11, no. 11, pp. 1260-1270, Nov 2002. % [http://dx.doi.org/10.1109/TIP.2002.804276] % % J. C. Bambre and R. J. Dickinson, "Ultrasonic B-scanning: A computer % simulation", Phys. Med. Biol., vol. 25, no. 3, pp. 463479, 1980. % [http://dx.doi.org/10.1088/0031-9155/25/3/006] % % 2011 (c) Debdoot Sheet, Indian Institute of Technology Kharagpur, India % Ver 1.0 27 October 2011 % % Example % ------- % echoModel = imread('phantom.bmp'); % outputImage = fcnPseudoBmodeUltrasoundSimulator(echoModel); % figure, subplot 121, imshow(echoModel,[]), subplot 122, % imshow(outputImage); % % 2011 (c) Debdoot Sheet, Indian Institute of Technology Kharagpur, India % All rights reserved. % % Permission is hereby granted, without written agreement and without % license or royalty fees, to use, copy, modify, and distribute this code % (the source files) and its documentation for any purpose, provided that % the copyright notice in its entirety appear in all copies of this code, % and the original source of this code. Further Indian Institute of % Technology Kharagpur (IIT Kharagpur / IITKGP) is acknowledged in any % publication that reports research or any usage using this code. % % In no circumstantial cases or events the Indian Institute of Technology % Kharagpur or the author(s) of this particular disclosure be liable to any % party for direct, indirectm special, incidental, or consequential % damages if any arising out of due usage. Indian Institute of Technology % Kharagpur and the author(s) disclaim any warranty, including but not % limited to the implied warranties of merchantability and fitness for a % particular purpose. The disclosure is provided hereunder "as in" % voluntarily for community development and the contributing parties have % no obligation to provide maintenance, support, updates, enhancements, % or modification. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Input argument support check iptcheckinput(echoModel,{'uint8','uint16','single','double'}, {'nonsparse','2d'}, mfilename,'I',1); if nargin == 1 f0 = 10e6; c = 1540; sigma_x = 2; sigma_y = 1.5; speckleVariance = 0.01; elseif nargin == 6 if f0 <= 0 error('Center frequency (f0) should be non-zero positive'); elseif c<=0 error('Velocity of sound (c) in media should be non-zero positive'); elseif sigma_x<=0 error('Pulse-width (sigma_x) of transmitting ultrasonic wave should be non-zero positive'); elseif sigma_y<=0 error('Beam-width (sigma_y) of transmitting ultrasonic wave should be non-zero positive'); elseif speckleVariance<=0 error('Variance of Speckle distribution (speckleVariance) of the media should be non-zero positive'); end else error('Unsupported calling of fcnPseudoBmodeUltrasoundSimulator'); end k0 = 2*pi*f0/c; [nRows nCols] = size(echoModel); echoModel = mat2gray(echoModel); G = rand([nRows nCols],'double'); G = (G-mean(G(:)))*speckleVariance; T = double(echoModel).*G; x = -10*sigma_x:10*sigma_x; y = -10*sigma_y:10*sigma_y; hx = (sin(k0*x).*exp(-(x.^2)/(2*sigma_x^2)))'; hy = exp(-(y.^2)/(2*sigma_y^2)); V = imfilter(imfilter(T,hx),hy); Vcap = hilbert(V); Va = V + (1i*Vcap); rfEnvelope = abs(Va); imageOut = im2uint8(mat2gray(log10(rfEnvelope)));
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
fcnPseudoBmodeUltrasoundSimulator.m
.m
4,999
128
function [imageOut, rfEnvelope] = fcnPseudoBmodeUltrasoundSimulator(echoModel,f0,c,sigma_x,sigma_y,speckleVariance) % fcnPseudoBmodeUltrasoundSimulator generates a simulated Pseudo B-Mode % Ultrasound image given the echogenicity model for the structure to be % imaged. % % OUTPUTIMAGE = fcnPseudoBmodeUltrasoundSimulator(ECHO_MODEL) generates % the simulated B-Mode Ultrasound image using default parameter settings. % The size of the simulated image is same as size of the echogenity % matrix provided. The OUTPUTIMAGE is of type uint8. Supported classes % for ECHO_MODEL are uint8, uint16, single, double. % % OUTPUTIMAGE = fcnPseudoBmodeUltrasoundSimulator(ECHO_MODEL, F_0, C, % SIGMA_X, SIGMA_Y, SPECKLE_VARIANCE) generates the simulated B-Mode % Ultrasound image using the specified parameters based on the ECHO_MODEL % provided. % % F_0 - Center frequency of the ultrasonic wave. Default valus is 10e6 % C - Velocity of sound in media (m/s). Default valus is 1540 % SIGMA_X - Pulse-width of transmitting ultrasonic wave. Default % value is 2 % SIGMA_Y - Beam-width) of transmitting ultrasonic wave. Default % value is 1.5 % SPECKLE_VARIANCE - Variance of Speckle distribution of the media. % Default value is 0.01 % % [OUTPUTIMAGE, RF_ENVELOPE] = fcnBPDFHE(...) returns also the % rf Envelope matrix for further usage. % % Details of the method are available in % % Yongjian Yu, Acton, S.T., "Speckle reducing anisotropic diffusion," % IEEE Trans. Image Processing, vol. 11, no. 11, pp. 1260-1270, Nov 2002. % [http://dx.doi.org/10.1109/TIP.2002.804276] % % J. C. Bambre and R. J. Dickinson, "Ultrasonic B-scanning: A computer % simulation", Phys. Med. Biol., vol. 25, no. 3, pp. 463479, 1980. % [http://dx.doi.org/10.1088/0031-9155/25/3/006] % % 2011 (c) Debdoot Sheet, Indian Institute of Technology Kharagpur, India % Ver 1.0 27 October 2011 % % Example % ------- % echoModel = imread('phantom.bmp'); % outputImage = fcnPseudoBmodeUltrasoundSimulator(echoModel); % figure, subplot 121, imshow(echoModel,[]), subplot 122, % imshow(outputImage); % % 2011 (c) Debdoot Sheet, Indian Institute of Technology Kharagpur, India % All rights reserved. % % Permission is hereby granted, without written agreement and without % license or royalty fees, to use, copy, modify, and distribute this code % (the source files) and its documentation for any purpose, provided that % the copyright notice in its entirety appear in all copies of this code, % and the original source of this code. Further Indian Institute of % Technology Kharagpur (IIT Kharagpur / IITKGP) is acknowledged in any % publication that reports research or any usage using this code. % % In no circumstantial cases or events the Indian Institute of Technology % Kharagpur or the author(s) of this particular disclosure be liable to any % party for direct, indirectm special, incidental, or consequential % damages if any arising out of due usage. Indian Institute of Technology % Kharagpur and the author(s) disclaim any warranty, including but not % limited to the implied warranties of merchantability and fitness for a % particular purpose. The disclosure is provided hereunder "as in" % voluntarily for community development and the contributing parties have % no obligation to provide maintenance, support, updates, enhancements, % or modification. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Input argument support check iptcheckinput(echoModel,{'uint8','uint16','single','double'}, {'nonsparse','2d'}, mfilename,'I',1); if nargin == 1 f0 = 10e6; c = 1540; sigma_x = 2; sigma_y = 1.5; speckleVariance = 0.01; elseif nargin == 6 if f0 <= 0 error('Center frequency (f0) should be non-zero positive'); elseif c<=0 error('Velocity of sound (c) in media should be non-zero positive'); elseif sigma_x<=0 error('Pulse-width (sigma_x) of transmitting ultrasonic wave should be non-zero positive'); elseif sigma_y<=0 error('Beam-width (sigma_y) of transmitting ultrasonic wave should be non-zero positive'); elseif speckleVariance<=0 error('Variance of Speckle distribution (speckleVariance) of the media should be non-zero positive'); end else error('Unsupported calling of fcnPseudoBmodeUltrasoundSimulator'); end k0 = 2*pi*f0/c; [nRows nCols] = size(echoModel); echoModel = mat2gray(echoModel); G = rand([nRows nCols],'double'); G = (G-mean(G(:)))*speckleVariance; T = double(echoModel).*G; x = -10*sigma_x:10*sigma_x; %-20 :20 y = -10*sigma_y:10*sigma_y; %-15 :15 hx = (sin(k0*x).*exp(-(x.^2)/(2*sigma_x^2)))'; hy = exp(-(y.^2)/(2*sigma_y^2)); V = imfilter(imfilter(T,hx),hy); Vcap = hilbert(V); Va = V + (1i*Vcap); rfEnvelope = abs(Va); imageOut = im2uint8(mat2gray(log10(rfEnvelope)));
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
theonewith_voxels.m
.m
940
45
clear all; close all; clc; gridesize=11; % numbers are arbitrary cube=zeros(11,11,11); cube(3:9,3:9,3:9)=5; % Create a cube inside the region % Boring: faces of the cube are a different color. cube(3:9,3:9,3)=1; cube(3:9,3:9,9)=1; cube(3:9,3,3:9)=1; cube(3:9,9,3:9)=1; cube(3,3:9,3:9)=2; cube(9,3:9,3:9)=1; H=vol3d('Cdata',cube,'alpha',cube/5) %what is alpha? figure; [vol_handle]=VoxelPlotter(cube,0.5); %size of each voxel view(3); daspect([1,1,1]); set(gca,'xlim',[0 gridesize], 'ylim',[0 gridesize], 'zlim',[0 gridesize]); xlabel('X');ylabel('Y');zlabel('Z'); %figure; %plot3(cube(:,:,1),cube(:,1,:)); %connects surfaces with specified value figure; x = 1:11; y = 1:11; z = 1:11; v = cube; alpha=isosurface(x,y,z,v,1); p = patch(isosurface(x,y,z,v,5)); isonormals(x,y,z,v,p) p.FaceColor = 'red'; p.EdgeColor = 'none'; daspect([1,1,1]) view(3); axis tight camlight lighting gouraud
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
test_5.m
.m
165
7
load example_us_bmode_scan_lines.mat echoModel= scan_lines; [imageOut, rfEnvelope] = fcnPseudoBmodeUltrasoundSimulator(echoModel); figure, imshow(imageOut);
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
FindExternalVoxels.m
.m
5,098
125
function [FV]=FindExternalVoxels(VoxelMat,Vox_Size) % FindExternalVoxels scans VoxeLMat (a 3D matrix) and finds which voxels % are external by checking if they have nieghbors from all 6 sides % (TOP,BOTTOM,FRONT,BACK,LEFT,RIGHT). After finding if the voxel is % external it finds which faces are external by calling FindExternalFaces % function and generating an FV structure for visualization %initializing variables FV.vertices=zeros(8*size(VoxelMat,1)*size(VoxelMat,2)*size(VoxelMat,3),3); FV.faces=zeros(6*size(VoxelMat,1)*size(VoxelMat,2)*size(VoxelMat,3),4); %initialization values FaceIndex=1; VertexIndex=0; counter=1; ExternalIndexes=zeros(size(VoxelMat,1)*size(VoxelMat,2)*size(VoxelMat,3),3); voxel_size=2^(Vox_Size-1)*[1 1 1]; h=waitbar(0,'loading voxels, please wait...'); for i=1:size(VoxelMat,1) for j=1:size(VoxelMat,2) for k=1:size(VoxelMat,3) if VoxelMat(i,j,k)>100 %value can be 255 , but thresolded for the case of bilnear interpolation.... if i==1 || j==1 || k==1 || i==size(VoxelMat,1) || j== size(VoxelMat,2) || k== size(VoxelMat,3) %if at begining or end ExternalIndexes(counter,1:3)=[i j k] ; [FV,FaceIndex,VertexIndex]=FindExternalFaces(VoxelMat,ExternalIndexes,voxel_size,counter,FaceIndex,VertexIndex,Vox_Size,FV); counter=counter+1; else %if neighbour is empty if VoxelMat(i+1,j,k)==0 || VoxelMat(i-1,j,k)==0 || VoxelMat(i,j+1,k)==0 || VoxelMat(i,j-1,k)==0 || VoxelMat(i,j,k+1)==0 || VoxelMat(i,j,k-1)==0 ExternalIndexes(counter,1:3)=[i j k] ; [FV,FaceIndex,VertexIndex]=FindExternalFaces(VoxelMat,ExternalIndexes,voxel_size,counter,FaceIndex,VertexIndex,Vox_Size,FV); counter=counter+1; end end end end end waitbar(i/size(VoxelMat,1)); end counter=counter-1; %non zero element check FV.vertices=FV.vertices(any(FV.vertices,2),:); FV.faces=FV.faces(any(FV.faces,2),:); close(h) ; end function [FV,FaceIndex,VertexIndex]=FindExternalFaces(VoxelMat,ExternalIndexes,voxel_size,i,FaceIndex,VertexIndex,LOD,FV) %face determination according to values selected for a voxel %front face - 1,2,3,4 %back face - 5,6,7,8 faces=[1 2 3 4; 2 6 7 3 ; 6 5 8 7; 5 1 4 8; 4 3 7 8 ; 1 2 6 5]; FV.vertices(VertexIndex+1:VertexIndex+8,:)=[2^(LOD-1)*(ExternalIndexes(i,1)-1)+0.5+[0 voxel_size(1) voxel_size(1) 0 0 voxel_size(1) voxel_size(1) 0]; ... 2^(LOD-1)*(ExternalIndexes(i,2)-1)+0.5+[0 0 0 0 voxel_size(2) voxel_size(2) voxel_size(2) voxel_size(2)]; ... 2^(LOD-1)*(ExternalIndexes(i,3)-1)+0.5+[0 0 voxel_size(3) voxel_size(3) 0 0 voxel_size(3) voxel_size(3)]]'; if ExternalIndexes(i,2)~=1 if VoxelMat(ExternalIndexes(i,1),ExternalIndexes(i,2)-1,ExternalIndexes(i,3))==0 %if val is zero %No Front neighbor FV.faces(FaceIndex,:)=faces(1,:)+VertexIndex; FaceIndex=FaceIndex+1; end else % Bounding Box Front FV.faces(FaceIndex,:)=faces(1,:)+VertexIndex; FaceIndex=FaceIndex+1; end if ExternalIndexes(i,1)~=size(VoxelMat,1) if VoxelMat(ExternalIndexes(i,1)+1,ExternalIndexes(i,2),ExternalIndexes(i,3))==0 %No Right neighbor FV.faces(FaceIndex,:)=faces(2,:)+VertexIndex; FaceIndex=FaceIndex+1; end else % Bounding Box Right FV.faces(FaceIndex,:)=faces(2,:)+VertexIndex; FaceIndex=FaceIndex+1; end if ExternalIndexes(i,2)~=size(VoxelMat,2) if VoxelMat(ExternalIndexes(i,1),ExternalIndexes(i,2)+1,ExternalIndexes(i,3))==0 % No Back neighbor FV.faces(FaceIndex,:)=faces(3,:)+VertexIndex; FaceIndex=FaceIndex+1; end else % Bounding Box Back FV.faces(FaceIndex,:)=faces(3,:)+VertexIndex; FaceIndex=FaceIndex+1; end if ExternalIndexes(i,1)~=1 if VoxelMat(ExternalIndexes(i,1)-1,ExternalIndexes(i,2),ExternalIndexes(i,3))==0 %No Left neighbor FV.faces(FaceIndex,:)=faces(4,:)+VertexIndex; FaceIndex=FaceIndex+1; end else % Bounding Box Left FV.faces(FaceIndex,:)=faces(4,:)+VertexIndex; FaceIndex=FaceIndex+1; end if ExternalIndexes(i,3)~=size(VoxelMat,3) if VoxelMat(ExternalIndexes(i,1),ExternalIndexes(i,2),ExternalIndexes(i,3)+1)==0 %No Top neighbor FV.faces(FaceIndex,:)=faces(5,:)+VertexIndex; FaceIndex=FaceIndex+1; end else % Bounding Box Top FV.faces(FaceIndex,:)=faces(5,:)+VertexIndex; FaceIndex=FaceIndex+1; end if ExternalIndexes(i,3)~=1 if VoxelMat(ExternalIndexes(i,1),ExternalIndexes(i,2),ExternalIndexes(i,3)-1)==0 %No Bottom neighbor FV.faces(FaceIndex,:)=faces(6,:)+VertexIndex; FaceIndex=FaceIndex+1; end else % Bounding Box Bottom FV.faces(FaceIndex,:)=faces(6,:)+VertexIndex; FaceIndex=FaceIndex+1; end VertexIndex=VertexIndex+8; end
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
bilinear.m
.m
1,069
47
function [im_zoom] = bilinear( array_recon ) %bilinear interpolation of an image is done here a=array_recon; factor=1; %zooming factor [m n d] = size(a); %2 dimentional array values rows=factor*m; columns=factor*n; for i=1:rows x=i/factor; x1=floor(x); %bilinear in X direction x2=ceil(x); if x1==0 x1=1; end xrem=rem(x,1); for j=1:columns y=j/factor; y1=floor(y); %bilinear in Y direction y2=ceil(y); if y1==0 y1=1; end yrem=rem(y,1); BottomLeft=a(x1,y1,:); TopLeft=a(x1,y2,:); BottomRight=a(x2,y1,:); TopRight=a(x2,y2,:); R1=BottomRight*yrem+BottomLeft*(1-yrem); R2=TopRight*yrem+TopLeft*(1-yrem); im_zoom(i,j)=R1*xrem+R2*(1-xrem); end end %size(im_zoom) %size(imagebig) %ssd=sum(sum(sum((imagebig-im_zoom).*(imagebig-im_zoom)))) %imwrite(im_zoom,'bilinear zoom.png'); %imshow(im_zoom); end
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
test_2.m
.m
430
22
%test - dicom image process = close all; clear all ; clc ; im=dicominfo('US-MONO2-8-8x-execho.dcm'); X=dicomread(im); a=zeros(120,128,8); for i=[1:8] a(:,:,i)=X(:,:,:,i); figure, imshow(uint8(a(:,:,1))); end %a_1=X(:,:,:,1); %extraction of first image // the range to extract jst the image %a_2=X(:,:,:,2); %extraction of the second image %figure, %i=[1:8]; %montage(a(:,:,i)); %impixelinfo;
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
VoxelPlotter.m
.m
348
16
function [vol_handle,FV]=VoxelPlotter(VoxelMat,Vox_Size) %detect the external voxels and faces vol_handle=0; if nargin==1 Vox_Size=1; end FV=FindExternalVoxels(VoxelMat,Vox_Size); %plot only external faces of external voxels cla; if size(FV.vertices,1)==0 cla; else %patch voxel vol_handle=patch(FV,'FaceColor','r'); end end
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
vol3d.m
.m
7,611
256
function [model] = vol3d(varargin) %H = VOL3D Volume render 3-D data. % VOL3D uses the orthogonal plane 2-D texture mapping technique for % volume rending 3-D data in OpenGL. Use the 'texture' option to fine % tune the texture mapping technique. This function is best used with % fast OpenGL hardware. % % vol3d Provide a demo of functionality. % % H = vol3d('CData',data) Create volume render object from input % 3-D data. Use interp3 on data to increase volume % rendering resolution. Returns a struct % encapsulating the pseudo-volume rendering object. % XxYxZ array represents scaled colormap indices. % XxYxZx3 array represents truecolor RGB values for % each voxel (along the 4th dimension). % % vol3d(...,'Alpha',alpha) XxYxZ array of alpha values for each voxel, in % range [0,1]. Default: data (interpreted as % scaled alphamap indices). % % vol3d(...,'Parent',axH) Specify parent axes. Default: gca. % % vol3d(...,'XData',x) 1x2 x-axis bounds. Default: [0 size(data, 2)]. % vol3d(...,'YData',y) 1x2 y-axis bounds. Default: [0 size(data, 1)]. % vol3d(...,'ZData',z) 1x2 z-axis bounds. Default: [0 size(data, 3)]. % % vol3d(...,'texture','2D') Only render texture planes parallel to nearest % orthogonal viewing plane. Requires doing % vol3d(h) to refresh if the view is rotated % (i.e. using cameratoolbar). % % vol3d(...,'texture','3D') Default. Render x,y,z texture planes % simultaneously. This avoids the need to % refresh the view but requires faster OpenGL % hardware peformance. % % vol3d(H) Refresh view. Updates rendering of texture planes % to reduce visual aliasing when using the 'texture'='2D' % option. % % NOTES % Use vol3dtool (from the original vol3d FEX submission) for editing the % colormap and alphamap. Adjusting these maps will allow you to explore % your 3-D volume data at various intensity levels. See documentation on % alphamap and colormap for more information. % % Use interp3 on input date to increase/decrease resolution of data % % Examples: % % % Visualizing fluid flow % v = flow(50); % h = vol3d('cdata',v,'texture','2D'); % view(3); % % Update view since 'texture' = '2D' % vol3d(h); % alphamap('rampdown'), alphamap('decrease'), alphamap('decrease') % % % Visualizing MRI data % load mri.mat % D = squeeze(D); % h = vol3d('cdata',D,'texture','3D'); % view(3); % axis tight; daspect([1 1 .4]) % alphamap('rampup'); % alphamap(.06 .* alphamap); % % See also alphamap, colormap, opengl, isosurface % Copyright Joe Conti, 2004 % Improvements by Oliver Woodford, 2008-2011, with permission of the % copyright holder. if nargin == 0 demo_vol3d; return end if isstruct(varargin{1}) model = varargin{1}; if length(varargin) > 1 varargin = {varargin{2:end}}; end else model = localGetDefaultModel; end if length(varargin)>1 for n = 1:2:length(varargin) switch(lower(varargin{n})) case 'cdata' model.cdata = varargin{n+1}; case 'parent' model.parent = varargin{n+1}; case 'texture' model.texture = varargin{n+1}; case 'alpha' model.alpha = varargin{n+1}; case 'xdata' model.xdata = varargin{n+1}([1 end]); case 'ydata' model.ydata = varargin{n+1}([1 end]); case 'zdata' model.zdata = varargin{n+1}([1 end]); end end end if isempty(model.parent) model.parent = gca; end [model] = local_draw(model); %------------------------------------------% function [model] = localGetDefaultModel model.cdata = []; model.alpha = []; model.xdata = []; model.ydata = []; model.zdata = []; model.parent = []; model.handles = []; model.texture = '3D'; tag = tempname; model.tag = ['vol3d_' tag(end-11:end)]; %------------------------------------------% function [model,ax] = local_draw(model) cdata = model.cdata; siz = size(cdata); % Define [x,y,z]data if isempty(model.xdata) model.xdata = [0 siz(2)]; end if isempty(model.ydata) model.ydata = [0 siz(1)]; end if isempty(model.zdata) model.zdata = [0 siz(3)]; end try delete(model.handles); catch end ax = model.parent; cam_dir = camtarget(ax) - campos(ax); [m,ind] = max(abs(cam_dir)); opts = {'Parent',ax,'cdatamapping',[],'alphadatamapping',[],'facecolor','texturemap','edgealpha',0,'facealpha','texturemap','tag',model.tag}; if ndims(cdata) > 3 opts{4} = 'direct'; else cdata = double(cdata); opts{4} = 'scaled'; end if isempty(model.alpha) alpha = cdata; if ndims(model.cdata) > 3 alpha = sqrt(sum(double(alpha).^2, 4)); alpha = alpha - min(alpha(:)); alpha = 1 - alpha / max(alpha(:)); end opts{6} = 'scaled'; else alpha = model.alpha; if ~isequal(siz(1:3), size(alpha)) error('Incorrect size of alphamatte'); end opts{6} = 'none'; end h = findobj(ax,'type','surface','tag',model.tag); for n = 1:length(h) try delete(h(n)); catch end end is3DTexture = strcmpi(model.texture,'3D'); handle_ind = 1; % Create z-slice if(ind==3 || is3DTexture ) x = [model.xdata(1), model.xdata(2); model.xdata(1), model.xdata(2)]; y = [model.ydata(1), model.ydata(1); model.ydata(2), model.ydata(2)]; z = [model.zdata(1), model.zdata(1); model.zdata(1), model.zdata(1)]; diff = model.zdata(2)-model.zdata(1); delta = diff/size(cdata,3); for n = 1:size(cdata,3) cslice = squeeze(cdata(:,:,n,:)); aslice = double(squeeze(alpha(:,:,n))); h(handle_ind) = surface(x,y,z,cslice,'alphadata',aslice,opts{:}); z = z + delta; handle_ind = handle_ind + 1; end end % Create x-slice if (ind==1 || is3DTexture ) x = [model.xdata(1), model.xdata(1); model.xdata(1), model.xdata(1)]; y = [model.ydata(1), model.ydata(1); model.ydata(2), model.ydata(2)]; z = [model.zdata(1), model.zdata(2); model.zdata(1), model.zdata(2)]; diff = model.xdata(2)-model.xdata(1); delta = diff/size(cdata,2); for n = 1:size(cdata,2) cslice = squeeze(cdata(:,n,:,:)); aslice = double(squeeze(alpha(:,n,:))); h(handle_ind) = surface(x,y,z,cslice,'alphadata',aslice,opts{:}); x = x + delta; handle_ind = handle_ind + 1; end end % Create y-slice if (ind==2 || is3DTexture) x = [model.xdata(1), model.xdata(1); model.xdata(2), model.xdata(2)]; y = [model.ydata(1), model.ydata(1); model.ydata(1), model.ydata(1)]; z = [model.zdata(1), model.zdata(2); model.zdata(1), model.zdata(2)]; diff = model.ydata(2)-model.ydata(1); delta = diff/size(cdata,1); for n = 1:size(cdata,1) cslice = squeeze(cdata(n,:,:,:)); aslice = double(squeeze(alpha(n,:,:))); h(handle_ind) = surface(x,y,z,cslice,'alphadata',aslice,opts{:}); y = y + delta; handle_ind = handle_ind + 1; end end model.handles = h; function demo_vol3d figure; load mri.mat vol3d('cdata', squeeze(D), 'xdata', [0 1], 'ydata', [0 1], 'zdata', [0 0.7]); colormap(bone(256)); alphamap([0 linspace(0.1, 0, 255)]); axis equal off set(gcf, 'color', 'w'); view(3);
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
SurfaceIntersection.m
.m
35,459
942
function [intMatrix, intSurface] = SurfaceIntersection(surface1, surface2, varargin) %SURFACEINTERSECTION intersection of 2 surfaces % [intMatrix, intSurface] = SurfaceIntersection(surface1, surface2) % calculates the intersection of surfaces 1 and 2. Code can either return % just the matrix indicating which face of surface1 intersected with face % of surface2, which is calculated using Tomas Moller algorithm, or can % also return the actual line of intersection. In case when parts of the % surface 1 and 2 lay on the same plane the intersection is a 2D area % instead of 1D edge. In such a case the intersection area will be % triangulated and intSurface.edges will hold the edges of the % triangulation surface and intSurface.faces will hold the faces. % % INPUT: % * surface1 & surface2 - two surfaces defined as structs or classes. % Several inputs are possible: % - struct with "faces" and "vertices" fields % - 'triangulation' class (only the boundary surface will be used) % - 'delaunayTriangulation' class % % OUTPUT: % * intMatrix - sparse Matrix with n1 x n2 dimension where n1 and n2 are % number of faces in surfaces % * intSurface - a structure with following fields: % intSurface.vertices - N x 3 array of unique points % intSurface.edges - N x 2 array of edge vertex ID's % intSurface.faces - N x 3 array of face vertex ID's % % ALGORITHM: % Based on Triangle/triangle intersection test routine by Tomas Mller, 1997. % See article "A Fast Triangle-Triangle Intersection Test", % Journal of Graphics Tools, 2(2), 1997 % http://web.stanford.edu/class/cs277/resources/papers/Moller1997b.pdf % http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/code/opttritri.txt %% Get FACES and VERTICES inputs if isa(surface1, 'triangulation') [surface1.faces, surface1.vertices] = freeBoundary(surface1); elseif isa(surface1, 'delaunayTriangulation') S = surface1; surface1 = []; surface1.faces = S.ConnectivityList; surface1.vertices = S.Points; clear S end if isa(surface2, 'triangulation') [surface2.faces, surface1.vertices] = freeBoundary(surface2); elseif isa(surface2, 'delaunayTriangulation') S = surface2; surface2 = []; surface2.faces = S.ConnectivityList; surface2.vertices = S.Points; clear S end ok1 = isstruct(surface1) && isfield(surface1, 'vertices') && isfield(surface1, 'faces'); ok2 = isstruct(surface2) && isfield(surface2, 'vertices') && isfield(surface2, 'faces'); assert(ok1, 'Surface #1 must be a struct with "faces" and "vertices" fields' ); assert(ok2, 'Surface #2 must be a struct with "faces" and "vertices" fields' ); %% Flip dimentions if necessery if size(surface1.faces,1)==3 && size(surface1.faces,2)~=3 surface1.faces = surface1.faces'; end if size(surface1.vertices,1)==3 && size(surface1.vertices,2)~=3 surface1.vertices = surface1.vertices'; end if size(surface2.faces,1)==3 && size(surface2.faces,2)~=3 surface2.faces = surface2.faces'; end if size(surface2.vertices,1)==3 && size(surface2.vertices,2)~=3 surface2.vertices = surface2.vertices'; end %% Parse extra parameters getIntersection = (nargout>1); debug = true; PointRoundingTol = 1e6; algorithm = 'moller'; k=1; nVarargs = length(varargin); while (k<=nVarargs) assert(ischar(varargin{k}), 'Incorrect input parameters') switch lower(varargin{k}) case 'debug' debug = varargin{k+1}~=0; k = k+1; case 'algorithm' algorithm = lower(strtrim(varargin{k+1})); k = k+1; case 'pointroundingtol' PointRoundingTol = varargin{k+1}; k = k+1; end k = k+1; end %% Initialize variables epsilon = eps; nFace1 = size(surface1.faces,1); nFace2 = size(surface2.faces,1); nVert1 = size(surface1.vertices,1); nVert2 = size(surface2.vertices,1); %% create strip down versions of MATLAB cross and dot function cross_prod = @(a,b) [... a(:,2).*b(:,3)-a(:,3).*b(:,2), ... a(:,3).*b(:,1)-a(:,1).*b(:,3), ... a(:,1).*b(:,2)-a(:,2).*b(:,1)]; dot_prod = @(a,b) a(:,1).*b(:,1)+a(:,2).*b(:,2)+a(:,3).*b(:,3); normalize = @(V) bsxfun(@rdivide,V, sqrt(sum(V.^2,2))); %% Initialize output variables % intersect is a nFace1 x nFace2 matrix. Possible values: -2 (do not know), % -1 (coplanar with unknown overlap), 0 (no intersections), 1 (intersects). % Negative values are internal only. intMatrix = zeros([nFace1,nFace2], 'int8')-2; % -2 indicates that there was no succesful test yet intSurface.vertices = []; intSurface.faces = []; intSurface.edges = []; % ======================================================================= %% === Stage 1 ========================================================== % ======================================================================= % Each triangle is a subset of the plane it lies in, so for two triangles % to intersect they must overlap along the line of intersection of their % planes. Hence, a necessary condition for intersection is that each % triangle must intersect the plane of the other. % Mllers method begins by checking the mutual intersection of each % triangle with the plane of the other. To do so, it determines for each % triangle on which side of the other triangles supporting plane its % vertices lie. Now, if all vertices of one triangle lie on the same side % and no vertex is on the plane, the intersection is rejected. %% compute plane equations for each triangle of the surface #1 % plane equation #1: N1.X-d1=0 V1 = surface1.vertices(surface1.faces(:,1),:); V2 = surface1.vertices(surface1.faces(:,2),:); V3 = surface1.vertices(surface1.faces(:,3),:); N1 = cross_prod(V2-V1,V3-V1); % array size nFace1 x 3 N1 = normalize(N1); d1 = dot_prod(N1,V1); % array size nFace1 x 1 %% Distance from surface #2 vertices to planes of surface #1 % Calculate signed distance from all vertices of surface #2 to each plane % of of surface #1 du = zeros(nFace1,nVert2); for iVert2 = 1:nVert2 p = surface2.vertices(iVert2,:); du(:,iVert2) = N1(:,1)*p(1) + N1(:,2)*p(2) + N1(:,3)*p(3) - d1; end if debug assert(all(size(du)==[nFace1,nVert2]), 'Incorrect array dimensions: dv') end du(abs(du)<epsilon)=0; % robustness check % Distances from vertex 1, 2 & 3 of faces of surface #2 to planes of surface #1 du1 = du(:,surface2.faces(:,1)); du2 = du(:,surface2.faces(:,2)); du3 = du(:,surface2.faces(:,3)); if debug assert(all(size(du1)==size(intMatrix)), 'Incorrect array dimensions: du1') end clear du intMatrix(du1.*du2>0 & du1.*du3>0) = 0; % same sign on all of them & not equal 0 if(all(intMatrix==0)), return; end % no intersections intMatrix(du1==0 & du2==0 & du3==0) = -1; % coplanar with unknown overlap %% compute plane of triangle (U0,U1,U2) % plane equation 2: N2.X-d2=0 U1 = surface2.vertices(surface2.faces(:,1),:); U2 = surface2.vertices(surface2.faces(:,2),:); U3 = surface2.vertices(surface2.faces(:,3),:); N2 = cross_prod(U2-U1,U3-U1); % array size nFace1 x 3 N2 = normalize(N2); d2 = dot_prod(N2,U1); % array size nFace1 x 1 %% Distance from surface #1 vertices to planes of surface #2 % Calculate signed distance from all vertices of surface #1 to each plane % of of surface #2 dv = zeros(nFace2,nVert1); for iVert1 = 1:nVert1 p = surface1.vertices(iVert1,:); dv(:,iVert1) = N2(:,1)*p(1) + N2(:,2)*p(2) + N2(:,3)*p(3) - d2; end if debug assert(all(size(dv)==[nFace2,nVert1]), 'Incorrect array dimensions: dv') end dv(abs(dv)<epsilon)=0; % robustness check % Distances from vertex 1, 2 & 3 of faces of surface #1 to planes of surface #2 dv1 = dv(:,surface1.faces(:,1))'; dv2 = dv(:,surface1.faces(:,2))'; dv3 = dv(:,surface1.faces(:,3))'; if debug assert(all(size(dv1)==size(intMatrix)), 'Incorrect array dimensions: dv1') end clear dv intMatrix(dv1.*dv2>0 & dv1.*dv3>0) = 0; % same sign on all of them & not equal 0 if(all(intMatrix==0)), return; end % no intersections intMatrix(dv1==0 & dv2==0 & dv3==0) = -1; % coplanar with unknown overlap % ======================================================================= %% === Stage 2 ========================================================== % ======================================================================= %% Process remaining (non-coplanar) triangle pairs tMsk = (intMatrix==-2); n = nnz(tMsk); if n>0 [face1, face2] = find(tMsk); switch lower(algorithm) case 'moller' if size(dv1(tMsk),1)==1 dv = [dv1(tMsk)', dv2(tMsk)', dv3(tMsk)']; du = [du1(tMsk)', du2(tMsk)', du3(tMsk)']; else dv = [dv1(tMsk), dv2(tMsk), dv3(tMsk)]; du = [du1(tMsk), du2(tMsk), du3(tMsk)]; end [intMatrix(tMsk), intSurface] = TriangleIntersection3D_Moller(... V1(face1,:), V2(face1,:), V3(face1,:), N1(face1,:), d1(face1,:), dv, ... U1(face2,:), U2(face2,:), U3(face2,:), N2(face2,:), d2(face2,:), du, ... getIntersection, debug); case 'rapid' % Undocumented experimental feature. In some experiments I got % identical results as with Moller algorithm, but others gave % different results. Often faster tham Moller. intMatrix(tMsk) = TriangleIntersection3D_Rapid( ... V1(face1,:), V2(face1,:), V3(face1,:), ... U1(face2,:), U2(face2,:), U3(face2,:), N1(face1,:), N2(face2,:) ); otherwise error('Unknown algorithm name'); end end % if %% Process coplanar triangle pairs. Pass #1: % compare the overlap of the bounding boxes tMsk = (intMatrix==-1); if nnz(tMsk)>0 [face1, face2] = find(tMsk); overlap = true; for idim = 1:3 v = [V1(face1,idim), V2(face1,idim), V3(face1,idim)]; u = [U1(face2,idim), U2(face2,idim), U3(face2,idim)]; t1 = min(v,[],2); t2 = max(v,[],2); s1 = min(u,[],2); s2 = max(u,[],2); overlap = overlap & (s1<=t2 & t1<=s2); end % if overlap intMatrix will remain "-1" otherwise it will change to "0" intMatrix(tMsk) = -1*overlap; clear v u t1 t2 s1 s2 overlap end %% Process coplanar triangle pairs. Pass #2: % use edge-edge intersections tMsk = (intMatrix==-1); if nnz(tMsk)>0 [face1, face2] = find(tMsk); % repack data prior to function call V(:,:,1)=V1(face1,:); V(:,:,2)=V2(face1,:); V(:,:,3)=V3(face1,:); U(:,:,1)=U1(face2,:); U(:,:,2)=U2(face2,:); U(:,:,3)=U3(face2,:); [intMatrix(tMsk), intSurface2] = TriangleIntersection2D(V, U, ... N1(face1,:), getIntersection, debug); %% Merge surfaces if getIntersection np = size(intSurface.vertices,1); intSurface.vertices = [intSurface.vertices; intSurface2.vertices]; intSurface.faces = [intSurface.faces; intSurface2.faces+np]; intSurface.edges = [intSurface.edges; intSurface2.edges+np]; if debug np = size(intSurface.vertices,1); assert(max(intSurface.faces(:))<=np, 'Bad surface definition') assert(max(intSurface.edges(:))<=np, 'Bad surface definition') end end end %% Clean up the outputs intMatrix = sparse(double(intMatrix)); if(getIntersection) % make point array unique P = round(intSurface.vertices*PointRoundingTol)/PointRoundingTol; [~,ia,ic] = unique(P,'rows'); % V = P(ia,:) and P = V(ic,:). intSurface.vertices = intSurface.vertices(ia,:); intSurface.faces = ic(intSurface.faces); intSurface.edges = ic(intSurface.edges); end end % function %% ======================================================================== function [iMsk, intSurface] = TriangleIntersection3D_Moller(... V1, V2, V3, N1, d1, dv, ... U1, U2, U3, N2, d2, du, ... getIntersection, debug) %TriangleIntersection3D tests if 2 triangles defined in 3D intersect. % This is a secondary test following Tomas Moller algorithm % % INPUTS: % V1, V2, V3, - Nx3 array of surface 1 triangle vertex coordinates % U1, U2, U3, - Nx3 array of surface 2 triangle vertex coordinates % N1, d1 - Nx3 array of surface 1 triangle plane equations N1.X-d1=0 % N2, d2 - Nx3 array of surface 2 triangle plane equations N2.X-d2=0 % dv - Nx3 array of distances of surface 1 triangle vertices to surface 2 planes % du - Nx3 array of distances of surface 2 triangle vertices to surface 1 planes % getIntersection - do we need to output the intersecting surface? % Algorithm is much simpler if we do not. % debug - In the debugging mode much more extra "sanity check" test % are performed. % % OUTPUT: % iMsk - N x 1 intersection boolean mask marking which triangles overlap % intSurface - intersection surface % % ALGORITHM: % The input triangles are guaranteed to intersect the line of intersection % of the two planes. Furthermore, these intersections form intervals on % this line, and the triangles overlap iff these intervals overlap as well. % Hence, the last part of the algorithm computes a parametric equation % L(t) of the line of intersection of the two planes, finds the intervals % (i.e. scalar intervals on L(t)) for which the line lies inside each % triangle and performs a one-dimensional interval overlap test. if debug ok = size(N1,2)==3 && size(N2,2)==3 && size(dv,2)==3 && size(du,2)==3 && ... size(V1,2)==3 && size(V2,2)==3 && size(V3,2)==3 && ... size(U1,2)==3 && size(U2,2)==3 && size(U3,2)==3; assert(ok, 'Incorrect array dimensions'); end %% create strip down versions of MATLAB cross and dot function cross_prod = @(a,b) [... a(:,2).*b(:,3)-a(:,3).*b(:,2), ... a(:,3).*b(:,1)-a(:,1).*b(:,3), ... a(:,1).*b(:,2)-a(:,2).*b(:,1)]; dot_prod = @(a,b) a(:,1).*b(:,1)+a(:,2).*b(:,2)+a(:,3).*b(:,3); normalize = @(V) bsxfun(@rdivide,V, sqrt(sum(V.^2,2))); %% Find intervals of surface 1 and 2 triangles % compute the scalar intervals on L(t) for which the line lies inside each % triangle % Plane creates two open half-spaces. Find the odd vertex, which: % 1) if no or two vertices are on the plane than pick the vertex which is % by itself in its half-space % 2) if one vertex is on the plane and the other two occupy the same % half-space than pick the vertex on the plane % 3) if one vertex is on the plane and the other two occupy different % half-spaces than pick one of the vertices off the plane % Find vertex using a look-up table "lut" with key calculated based on % sign of dv and du arrays lut = [0;3;3;2;1;3;2;2;1;1;2;3;3;0;3;3;2;1;1;2;2;3;1;2;3;3;0]; n = numel(d1); rows = (1:n)'; %% order surface 1 triangle vertices a1 = lut(sign(dv)*[9; 3; 1] + 14); % calculate the key and call the look-up table [b1, c1] = otherDim(a1); if debug assert(all(a1>0), 'Something Wrong: triangles are coplanar') end a1 = sub2ind([n,3],rows,a1); % convert row and column IDs to array indecies b1 = sub2ind([n,3],rows,b1); c1 = sub2ind([n,3],rows,c1); %% order surface 2 triangle vertices a2 = lut(sign(du)*[9; 3; 1] + 14); % calculate the key and call the look-up table [b2, c2] = otherDim(a2); if debug assert(all(a2>0), 'Something Wrong: triangles are coplanar') end a2 = sub2ind([n,3],rows,a2); b2 = sub2ind([n,3],rows,b2); c2 = sub2ind([n,3],rows,c2); %% compute direction of L the line of intersection of 2 planes % containing 2 triangles. Line L parametric equation: t*D+O=0 D = cross_prod(N1,N2); % D must be perpendicular to both N1 and N2 [~, maxDim] = max(abs(D),[],2); % compute and index to the largest component of D if(getIntersection) D = normalize(D); O = zeros(n,3); d = [d1, d2, zeros(n,1)]; for r =1:n N = [N1(r,:); N2(r,:); 0, 0, 0]; N(3,maxDim(r)) = 1; dd = d(r,:)'; O(r,:) = (N\dd)'; %Solve systems of linear equations N*D3 = d for D3 end clear N d dd end %% projection of triangle(V1,V2,V3) and triangle(U1,U2,U3) onto intersection line % Vp and Up are Nx3 arrays with columns indicating corners of triangles 1 and 2 if(getIntersection) Vp=[dot_prod(V1-O,D), dot_prod(V2-O,D), dot_prod(V3-O,D)]; Up=[dot_prod(U1-O,D), dot_prod(U2-O,D), dot_prod(U3-O,D)]; else % Project on one of the axis (closest to the intersection line) instead. % Simplified projection is faster and sufficient if we do not need % intersection line idx = sub2ind([n,3],rows,maxDim); Vp = [V1(idx), V2(idx), V3(idx)]; Up = [U1(idx), U2(idx), U3(idx)]; end clear V1 V2 V3 U1 U2 U3 %% Calculate surface 1 and 2 triangle intervals % t1 and t2 are intersection points of surface 1 with the intersection line % t*D+O=0, and s1 & s2 are intersection points of surface 2 with the same % line. Tomas Moller algorithm made this section much more complicated % trying to avoid divisions. However, I could not detect any speed-up. % Operations (ADD: 12; MUL:4 ; DIV:4 ) t1 = Vp(a1) - (Vp(b1)-Vp(a1)).*dv(a1)./(dv(b1)-dv(a1)); t2 = Vp(a1) - (Vp(c1)-Vp(a1)).*dv(a1)./(dv(c1)-dv(a1)); s1 = Up(a2) - (Up(b2)-Up(a2)).*du(a2)./(du(b2)-du(a2)); s2 = Up(a2) - (Up(c2)-Up(a2)).*du(a2)./(du(c2)-du(a2)); %% Order the intervals as to t1<t2 and s1<s2 msk = t2<t1; % order t1 and t2 so t1<t2 t = t1(msk); t1(msk)=t2(msk); t2(msk)=t; % swap msk = s2<s1; % order s1 and s2 so s1<s2 t = s1(msk); s1(msk)=s2(msk); s2(msk)=t; % swap %% Perform THE final test we were preparying for. % It test for the overlap of 2 1D intervals s1->s2 and t1->t2 iMsk = (s1<t2 & t1<s2); %% calculate intersection segments n = nnz(iMsk); if(getIntersection && n>0) % p1 = D*max(t1,s1) + O; p2 = D*min(t2,s2) + O p1 = bsxfun(@times,D(iMsk,:),max(t1(iMsk),s1(iMsk))) + O(iMsk,:); p2 = bsxfun(@times,D(iMsk,:),min(t2(iMsk),s2(iMsk))) + O(iMsk,:); intSurface.vertices = [p1; p2]; intSurface.faces = [1:n; n+1:2*n; n+1:2*n]'; intSurface.edges = intSurface.faces(:,1:2); else intSurface.vertices = []; intSurface.faces = []; intSurface.edges = []; end % if end % function %% ======================================================================== function [overlap, intSurface] = TriangleIntersection2D(V, U, N, ... getIntersection, debug) % Triangles V(V0,V1,V2) and U(U0,U1,U2) are are coplanar. Do they overlap? % INPUTS: % N - array(n,3) of surface normals where V(i,:,:) and U(i,:,:) are on the same plane % V - array(n,3,3) (nFace x 3 dimensions x 3 vertices) of surface #1 vertices % U - array(n,3,3) (nFace x 3 dimensions x 3 vertices) of surface #2 vertices % % OUTPUT: % iMsk - N x 1 intersection boolean mask marking which triangles overlap % intSurface - intersection surface % * parameters: vertices of triangle 1: V0,V1,V2 % * vertices of triangle 2: U0,U1,U2 % * result : returns 1 if the triangles intersect, otherwise 0 %% Constants needed for creating a mesh based on 3 to 6 points in a circle tri_mesh{6} = [1 2 6; 2 4 6; 2 3 4; 4 5 6]; tri_mesh{5} = [1 2 3; 1 3 4; 4 5 1]; tri_mesh{4} = [1 2 3; 1 3 4]; tri_mesh{3} = 1:3; vertices = []; faces = []; pairs = []; % each row corresponds to pair of faces. match row number with face number nVert = 0; %% use edge-edge intersections overlap = false(size(N,1),1); i1Idx = [1 1 1 2 2 2 3 3 3]; i2Idx = [3 3 3 1 1 1 2 2 2]; j1Idx = [1 2 3 1 2 3 1 2 3]; j2Idx = [3 1 2 3 1 2 3 1 2]; for row = 1:size(N,1) % When it is necesary to project 3D plane on 2D, dIdx will be the optimal % dimensions to use. [~, a] = max(abs(N(row,:))); [b, c] = otherDim(a); dIdx = [b, c]; order = []; %% test all edges of triangle 1 against the edges of triangle 2 % triangles overlap if edges cross [edgeMat, P] = EdgesIntersect3D(... squeeze(V(row,:,i1Idx))',squeeze(V(row,:,i2Idx))', ... squeeze(U(row,:,j1Idx))',squeeze(U(row,:,j2Idx))'); overlap(row) = any(edgeMat); if ~getIntersection && overlap(row), continue; end if ~overlap(row) %% project onto an axis-aligned plane, that maximizes the area % of the triangles, compute indices: dIdx which correspond to 2 smallest N1 % components. V2d = [V(row,dIdx,1); V(row,dIdx,2); V(row,dIdx,3)]; % each row is a 2D vertex U2d = [U(row,dIdx,1); U(row,dIdx,2); U(row,dIdx,3)]; %% test if tri1 is totally contained in tri2 or vice varsa if PointInTriangle2D(V2d(1,:), U2d) % tri1 is totally contained in tri2 overlap(row) = true; order = 1:3; elseif PointInTriangle2D(U2d(1,:), V2d) % tri2 is totally contained in tri1 overlap(row) = true; order = 4:6; end if overlap(row) && ~getIntersection, continue; end clear V2d U2d end %% Build the intersection surface if getIntersection && overlap(row) %Assemble all the points which might be needed for desining %intersection polygon: Intersection points and points from triangle 1 %and 2 points = [P(edgeMat,:); squeeze(V(row,:,:))'; squeeze(U(row,:,:))']; if isempty(order) % when one tri is totally contained in the other tri then order is set order = IntersectionPolygon(edgeMat>0, points, dIdx, debug); if isempty(order), continue; end end nPoint = length(order); % how many points will be added? nFace = nPoint-2; % how many faces will be added? vertices = [vertices; points(order,:)]; %#ok<*AGROW> faces = [faces; nVert+tri_mesh{nPoint} ]; pairs = [pairs; row+zeros(nFace,1)]; % each row corresponds to pair of faces. match row number with face number nVert = nVert + nPoint; if debug assert(max(faces(:))<=size(vertices,1), 'Bad surface definition') end end end % for %% Prepare outputs intSurface.vertices = vertices; intSurface.faces = faces; if isempty(faces) intSurface.edges = []; else intSurface.edges = [faces(:,1:2); faces(:,2:3); faces(:,[1,3])]; end end % function %% ======================================================================== function polygon = IntersectionPolygon(edgeMat, points, dIdx, debug) % edgeMat is an edge intersection matrix with 3 rows for edges between % the points 1-3, 1-2, & 2-3 of the triangle 1 and 3 columns for the same % edges of the triangle 2. If 2 edges intersect a point of intersection % is calculated and stored in array "points" followed by points of the % triangles 1 & 2. This function calculates the polygon of the intersection % between 2 triangles. persistent orderLUT verified if isempty(orderLUT) || isempty(orderLUT{3}) % This pre-calculated look-up table is used to quickly look up the order of % the vertices in array "points" which make up polygon of the intersection % between 2 triangles. A unique key is calculated for each edgeMat using % dot product between edgeMat(:) and [256 128 64 32 16 8 4 2 1], which is % used to look up point order around the polygon. Negative numbers in the % LUT indicate values which were not observed yet so they were not % independently verified. % reshape(sprintf('%09s',dec2base(key, 2)),3,3) will convert from the key % to matrix. OrderLUT = zeros(432,1); OrderLUT(003) = 127; OrderLUT(005) = 128; OrderLUT(006) = 126; OrderLUT(009) = 124; OrderLUT(010) = 1427; OrderLUT(012) = 1428; OrderLUT(017) = 1427; OrderLUT(018) = 124; OrderLUT(020) = 1426; OrderLUT(024) = 127; OrderLUT(027) = 1243; OrderLUT(029) = 12438; OrderLUT(030) = 12034; OrderLUT(033) = 1428; OrderLUT(034) = 1426; OrderLUT(036) = 124; OrderLUT(040) = 128; OrderLUT(043) = 21834; OrderLUT(045) = 1243; OrderLUT(046) = 21349; OrderLUT(048) = 126; OrderLUT(051) = 12340; OrderLUT(053) = 12943; OrderLUT(054) = 1243; OrderLUT(065) = 125; OrderLUT(066) = 1527; OrderLUT(068) = 1825; OrderLUT(072) = 123; OrderLUT(080) = 1327; OrderLUT(083) = 15234; OrderLUT(085) = -15234; OrderLUT(086) = -15243; OrderLUT(090) = 13247; OrderLUT(092) = -13247; OrderLUT(096) = 1328; OrderLUT(099) = 152834; OrderLUT(101) = 15234; OrderLUT(102) = 152349; OrderLUT(106) = 132847; OrderLUT(108) = 13247; OrderLUT(114) = 102347; OrderLUT(116) = -13247; OrderLUT(129) = 1527; OrderLUT(130) = 125; OrderLUT(132) = 1526; OrderLUT(136) = 1327; OrderLUT(139) = 15243; OrderLUT(141) = 152438; OrderLUT(142) = 152034; OrderLUT(144) = 123; OrderLUT(153) = 12347; OrderLUT(156) = 123047; OrderLUT(160) = 1326; OrderLUT(163) = -152043; OrderLUT(165) = 13247; OrderLUT(166) = 15234; OrderLUT(169) = -182347; OrderLUT(172) = 193247; OrderLUT(177) = -132047; OrderLUT(180) = 13247; OrderLUT(192) = 127; OrderLUT(195) = 1243; OrderLUT(197) = 12438; OrderLUT(198) = 12034; OrderLUT(202) = 12364; OrderLUT(204) = 123648; OrderLUT(209) = 21364; OrderLUT(212) = -21364; OrderLUT(216) = 1243; OrderLUT(225) = -124638; OrderLUT(226) = 120364; OrderLUT(232) = 12438; OrderLUT(238) = 124356; OrderLUT(240) = 12034; OrderLUT(245) = -214356; OrderLUT(257) = 1528; OrderLUT(258) = 1526; OrderLUT(260) = 125; OrderLUT(264) = 1328; OrderLUT(267) = -152438; OrderLUT(269) = 15243; OrderLUT(270) = -152943; OrderLUT(272) = 1326; OrderLUT(275) = 152340; OrderLUT(277) = 152943; OrderLUT(278) = 15243; OrderLUT(281) = 182347; OrderLUT(282) = -103247; OrderLUT(288) = 123; OrderLUT(297) = 12347; OrderLUT(298) = -123947; OrderLUT(305) = 123947; OrderLUT(306) = 12347; OrderLUT(320) = 128; OrderLUT(323) = 21834; OrderLUT(325) = 1243; OrderLUT(326) = 21349; OrderLUT(330) = -123648; OrderLUT(332) = 12364; OrderLUT(337) = 183642; OrderLUT(340) = -129364; OrderLUT(344) = 21834; OrderLUT(350) = -124365; OrderLUT(353) = 12463; OrderLUT(354) = 136492; OrderLUT(360) = 1243; OrderLUT(368) = 12943; OrderLUT(371) = 126543; OrderLUT(384) = 126; OrderLUT(387) = 12340; OrderLUT(389) = 12943; OrderLUT(390) = 1243; OrderLUT(394) = -103642; OrderLUT(396) = 129364; OrderLUT(401) = 123640; OrderLUT(404) = 12364; OrderLUT(408) = 12340; OrderLUT(413) = 215643; OrderLUT(417) = -136492; OrderLUT(418) = 12463; OrderLUT(424) = 13492; OrderLUT(427) = -213456; OrderLUT(432) = 1342; % Convert to more convinient format orderLUT = cell(size(OrderLUT)); for i = 1:size(OrderLUT,1) polygon = abs(OrderLUT(i)); if polygon>0 polygon = num2str(polygon)-48; % Convert from a single number to array of digits polygon(polygon==0) = 10; % 0 stands for 10 orderLUT{i} = polygon; end end % Negative numbers in the LUT indicate values which were not observed yet % so they were not independently verified. verified = OrderLUT>0; clear OrderLUT end %% Calculate unique key for each edgeMat configuration key = dot(1*edgeMat(:)', [256 128 64 32 16 8 4 2 1]); assert(key<=432, 'Error: in IntersectionPolygon: key is out of bound'); %% Look up the point order around the polygon polygon = orderLUT{key}; if (isempty(polygon)) return end %% in a rare case of 2 intersections there is ambiguity if one or two % vertices of the triangle lay inside the other triangle. OrderLUT stores % only the single vertex cases. nx = nnz(edgeMat(:)); if nx==2 pList = polygon; % list of vertices to check pList(pList<=nx) = []; % keep only the triangle points of the polygon flip = false; % was there a flip from single vertex to vertices case? for ip = 1:length(pList) p = pList(ip); % point to check t = floor((p-nx-1)/3); % does it belong to triangle 0 or 1 (actually 1 or 2) tri = (1:3) + nx + 3*abs(1-t); % Points belonging to the other triangle if ~PointInTriangle2D(points(p,dIdx), points(tri,dIdx)) d = nx+t*3; % offset % "p-d" is vertex number of point just tested: 1, 2, or 3. "b, c" are % the other 2 vertices [b, c] = otherDim(p-d); polygon = [polygon(polygon~=p), b+d, c+d]; % remove i2 and add i0 and i1 flip = true; end end if flip % if ther were any flips than use existing codes to figure out the % order of the points around the polygon DT = delaunayTriangulation(points(polygon,dIdx)); idx = freeBoundary(DT)'; idx(2,:) = []; polygon = polygon(idx); end end %% Check to duplicate points tol = 1e6; P = round(points(polygon,:)*tol)/tol; [~,ia] = unique(P,'rows'); % V = P(ia,:) and P = V(ic,:). polygon = polygon(sort(ia)); %% Test the results using more expensive function doPlot = (~verified(key)); if debug && length(polygon)>3 DT = delaunayTriangulation(points(polygon,dIdx)); idx = freeBoundary(DT)'; idx(2,:) = []; k = max(abs(diff(idx))); %doPlot = (k>1 && k<(length(idx)-1)) || (~verified(key)); assert(k==1 || k==(length(idx)-1), 'Two triangle intersection polygon is not convex') end if debug && doPlot % plot the interesting cases PlotTwoTriangles(points, polygon, 'm') title(sprintf('key = %i', key)); end end % function %% ======================================================================== function PlotTwoTriangles(points, polygon, color) % Plotting function used for debugging nx = size(points,1)-6; d = (max(points,[],1)-min(points,[],1))/200; figure(2) clf hold on line( points(nx+(1:2),1), points(nx+(1:2),2), points(nx+(1:2),3), 'Color', 'g'); line( points(nx+(2:3),1), points(nx+(2:3),2), points(nx+(2:3),3), 'Color', 'g'); line( points(nx+[1,3],1), points(nx+[1,3],2), points(nx+[1,3],3), 'Color', 'g'); line( points(nx+(4:5),1), points(nx+(4:5),2), points(nx+(4:5),3), 'Color', 'b'); line( points(nx+(5:6),1), points(nx+(5:6),2), points(nx+(5:6),3), 'Color', 'b'); line( points(nx+[4,6],1), points(nx+[4,6],2), points(nx+[4,6],3), 'Color', 'b'); plot3( points(:,1), points(:,2), points(:,3), 'm.'); if (length(polygon)>2) idx = polygon([1:end, 1]); plot3( points(idx,1), points(idx,2),points(idx,3), 'Color', color, 'LineWidth', 1); end for i = 1:nx+6 text(points(i,1)+d(1), points(i,2)+d(2), points(i,3), num2str(i)) end end % function %% ======================================================================== function [intersect, X] = EdgesIntersect3D(V1,V2, U1,U2) %EdgesIntersectPoint3D calculates point of intersection of 2 coplanar % segments in 3D % % INPUTS: % V1,V2 - 1 x 3 coordinates of endpoints of edge 1 % U1,U2 - 1 x 3 coordinates of endpoints of edge 2 % OUTPUT: % X - 1 x 3 coordinates of the intersection point A = V2-V1; B = U1-U2; C = U1-V1; %% Solve system of equations [A,B,1] * [d;e;0] = C for d and e det3 = @(a,b) ... % determinant of a matrix with columns: [a, b, 1] a(:,1).*b(:,2)-a(:,3).*b(:,2) + ... a(:,2).*b(:,3)-a(:,2).*b(:,1) + ... a(:,3).*b(:,1)-a(:,1).*b(:,3); f=det3(A,B); % https://en.wikipedia.org/wiki/Cramer%27s_rule#Explicit_formulas_for_small_systems t=det3(C,B)./f; % use Cramer's rule s=det3(A,C)./f; intersect = (t>=0 & t<=1 & s>=0 & s<=1); X = V1 + bsxfun(@times,A,t); end % function %% ======================================================================== function inside = PointInTriangle2D(V1, U) % check if V1 is inside triangle U (U1,U2,U3) % Algorithm is checking on which side of the half-plane created by the % edges the point is. It uses sign of determinant to calculate orientation % of point triplets. % INPUTS: % V1 - 1 x 2 coordinates of a point % U - 3 x 2 coordinates of endpoints of 3 edges of a triangle % OUTPUT: % inside - a boolean or boolean array det2 = @(A,B,C) (A(:,1)-C(:,1))*(B(:,2)-C(:,2)) - (B(:,1)-C(:,1))*(A(:,2)-C(:,2)); b1 = (det2(U(1,:), U(2,:), V1) > 0); b2 = (det2(U(2,:), U(3,:), V1) > 0); b3 = (det2(U(3,:), U(1,:), V1) > 0); inside = ((b1 == b2) & (b2 == b3)); % inside if same orientation for all 3 edges end % function %% ======================================================================== function [b, c] = otherDim(a) % return set [1 2 3] without k b = mod(a+1,3)+1; % b and c are vertices which are on the same side of the plane c = 6-a-b; % a+b+c = 6 end %% ======================================================================== function overlap = TriangleIntersection3D_Rapid( v1, v2, v3, u1, u2, u3, n1, n2 ) %TriangleIntersection3D tests if 2 triangles defined in 3D intersect. % % INPUTS: % v1, v2, v3, - Nx3 array of surface 1 triangle vertex coordinates % u1, u2, u3, - Nx3 array of surface 2 triangle vertex coordinates % n1, n2 - Nx3 array of surface 1 & 2 triangle plane normals. Those % are optional and if provided than the first 2 steps of the algorithm % (which are equivalent to first 2 steps of Moller algorithm) will be % skipped. % % OUTPUT: % iMsk - N x 1 intersection boolean mask marking which triangles overlap % % ALGORITHM: % translated from the UNC-CH V-Collide RAPID code % https://wwwx.cs.unc.edu/~geom/papers/COLLISION/vcol.pdf global V1 V2 V3 U1 U2 U3 cross_prod = @(a,b) [... a(:,2).*b(:,3)-a(:,3).*b(:,2), ... a(:,3).*b(:,1)-a(:,1).*b(:,3), ... a(:,1).*b(:,2)-a(:,2).*b(:,1)]; %% shift t1 and t2 by p1# V1 = zeros(size(v1)); V2 = v2-v1; V3 = v3-v1; U1 = u1-v1; U2 = u2-v1; U3 = u3-v1; clear v1 v2 v3 u1 u2 u3 if(nargin<7) %% now begin the series of tests n1 = cross_prod( V2-V1, V3-V1 ); % face normals n2 = cross_prod( U2-U1, U3-U1 ); % face normals end %% test the face normals overlap = project6(n1) & project6(n2); V1 = V1(overlap,:); V2 = V2(overlap,:); V3 = V3(overlap,:); U1 = U1(overlap,:); U2 = U2(overlap,:); U3 = U3(overlap,:); n1 = n1(overlap,:); n2 = n2(overlap,:); %% compute triangle edges e1 = V2-V1; e2 = V3-V2; e3 = V1-V3; f1 = U2-U1; f2 = U3-U2; f3 = U1-U3; %% run more tests overlap2 = project6(cross_prod(e1, f1)); overlap2 = project6(cross_prod(e1, f2)) & overlap2; overlap2 = project6(cross_prod(e1, f3)) & overlap2; overlap2 = project6(cross_prod(e2, f1)) & overlap2; overlap2 = project6(cross_prod(e2, f2)) & overlap2; overlap2 = project6(cross_prod(e2, f3)) & overlap2; overlap2 = project6(cross_prod(e3, f1)) & overlap2; overlap2 = project6(cross_prod(e3, f2)) & overlap2; overlap2 = project6(cross_prod(e3, f3)) & overlap2; overlap2 = project6(cross_prod(e1, n1)) & overlap2; overlap2 = project6(cross_prod(e2, n1)) & overlap2; overlap2 = project6(cross_prod(e3, n1)) & overlap2; overlap2 = project6(cross_prod(f1, n2)) & overlap2; overlap2 = project6(cross_prod(f2, n2)) & overlap2; overlap2 = project6(cross_prod(f3, n2)) & overlap2; overlap(overlap) = overlap2; end %% ======================================================================== function pass = project6( p ) % project all 6 vertices of both triangles onto vector p and check if two % projections overlap global V1 V2 V3 U1 U2 U3 dot_prod = @(a,b) a(:,1).*b(:,1)+a(:,2).*b(:,2)+a(:,3).*b(:,3); %% Project vertices of triangle 1 and find the bounds min1 and max1 P = [dot_prod(p, V1), dot_prod(p, V2), dot_prod(p, V3)]; max1 = max(P,[],2); min1 = min(P,[],2); %% Project vertices of triangle 2 and find the bounds min1 and max1 P = [dot_prod(p, U1), dot_prod(p, U2), dot_prod(p, U3)]; max2 = max(P,[],2); min2 = min(P,[],2); %% Compare the bounds to see if they overlap pass = (( min1 < max2 ) & ( min2 < max1 )) | ~dot_prod(p, p); end
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
task_final_praga.m
.m
3,669
128
close all ; clear all ; clc; %%author - Praga %%process time - 13 sec (without thresholding) %% - 1 minute (with thresolding) %%%%%%%% Task 1 %%%%%%%%%%%%%%%%%%%%%%% %%the conditions for R %%%%%%%%%%%%%%%%%% %% 1) tan(theta/2) > R/Yp %% 2) Yus*cos(theta/2) > 2R %% 3) Xus*cos(theta/2) > 2R %% 4) Yus - Yp > R %% 5) Xus > 2R %% %%%%%%% Task 2 %%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%defining all the variables%%%%%%%% FOV=120; %degree theta=(pi*2)/180; %radian R=2; %cm Yp=6; %cm Xus=10; %cm Yus=10; %cm Sx=0.1; %cm Sy=0.1; %cm num_pixel=Xus/Sx; %done only for X as both are equal %%to get the main image over here [im_main]=createmainimage(Xus,R,theta,num_pixel); %%%%%%%%% Task 3 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%creation and visualization of the empty voxel%%% VoxelMat=zeros(num_pixel,num_pixel,num_pixel); for i=1:num_pixel for j=1:num_pixel for k=1:num_pixel if (i-num_pixel/2)^2+(j-num_pixel/2)^2+(k-num_pixel/2)^2<(R/Sx)^2 VoxelMat(i,j,k)=255; end end end end %view of voxels [vol_handle,FV]=VoxelPlotter(VoxelMat,1); view(3); daspect([1,1,1]); set(gca,'xlim',[0 num_pixel], 'ylim',[0 num_pixel], 'zlim',[0 num_pixel]); title('actual sphere in Voxels without the reconstruction algorithem'); xlabel('X');ylabel('Y');zlabel('Z'); %push of taken image back into a 3D array as reconstruction %reconstruction view will be differnt as it makes coding easy %rotating the reconstructed matrix by 90 degrees will give the original %will be working with X and Z axis %%nearest neighbour voxel mapping array_recon=zeros(num_pixel,num_pixel,num_pixel); angle_1=-pi/3:theta:pi/3; %in radians %special case handled first ....... %array_recon(:,:,num_pixel/2)=im_main(:,:,31); %delete the angle zero from array %angle_1= angle_1([1:30, 32:end]); for k=1:size(angle_1,2) %rotation matrix with respect to y axis %Rot = [cos(angle(k)) 0 sin(angle(k));0 1 0;-sin(angle(k)) 0 cos(angle(k))]; x=size(im_main,1); y=size(im_main,2); for i =1:1:x for j=1:1:y new_cord=rotx(angle_1(k)*180/pi)*[i,j,0]'; new_cord(3)=new_cord(3)+50; new_cord=ceil(new_cord); if new_cord(3)>0 array_recon(new_cord(1),new_cord(2),new_cord(3))=im_main(i,j,k); array_recon=uint8(array_recon); end end end end %%print voxel after nearest neighbour map figure; [array_recon_1]=VoxelPlotter(array_recon,1); view(3); daspect([1,1,1]); set(gca,'xlim',[0 num_pixel], 'ylim',[0 num_pixel], 'zlim',[0 num_pixel]); title('Voxels without the interpolation'); xlabel('X');ylabel('Y');zlabel('Z'); %%%%%bilinear interpolation %%%%%%%%%%%% zoomed=zeros(size(array_recon)); %%%bilinear interpolation for j=1:size(array_recon,3) zoomed(:,:,j)=bilinear( array_recon(:,:,j) ); end figure; [zoomed_1]=VoxelPlotter(zoomed,1); view(3); daspect([1,1,1]); set(gca,'xlim',[0 num_pixel], 'ylim',[0 num_pixel], 'zlim',[0 num_pixel]); title('Voxels after bilinear interpolation'); xlabel('X');ylabel('Y');zlabel('Z'); %%%%%%%safe images after reconstruction %%%%%%%%% mkdir('final3Dimagesafterrecon'); cd('final3Dimagesafterrecon'); %save the image image_index=1; for i=1:100 filename = 'p%d.bmp'; filename = sprintf(filename,image_index); image_index = image_index + 1; imwrite(zoomed(:,:,i),filename); end
MATLAB
3D
pragasv/Modeling-a-3D-ultrasound-scan-and-reconstruction
Update_frontend/task_upgrade.m
.m
2,884
105
%task 2 ___ Update 1.0 %%%a front end script %%%%%%%%%%%%%%%%%% %% the contour can be visualized in this script in a more mathematically efficient manner %% the images will be saved in a directory after plot %% this can be loaded for the next step processing %% this script is designed for all knds of scenario which may arrise according to 3D image processing %% %%%%%%Task 2 a differennt approach <more mathematical>%%%%%%%%%%%%%%%%%%%%%%%% %========================================================================= clear all; close all; clc; %input the params from the user prompt = {'Enter Xus (cm):','Enter Yus (cm):','Enter sx (cm):'... 'Enter sy (cm):','Enter FOV (Degrees):'... 'Enter Angle per Frame (Degrees):','Enter R (cm):','Enter Yp (cm):'}; dlg_title = 'Input User Parameters'; num_lines = 1; def = {'10','10','0.1','0.1','120','2','2','6'}; answer = inputdlg(prompt,dlg_title,num_lines,def); Xus = str2double(answer(1));Yus = str2double(answer(2)); sx = str2double(answer(3));sy = str2double(answer(4)); FOV = str2double(answer(5));APF = str2double(answer(6)); R = str2double(answer(7));Yp = str2double(answer(8)); %define figures h = figure; image_index = 1; %make folder mkdir('Projection_Images'); cd('Projection_Images'); for angle = -FOV/2 : APF : FOV/2 %generate the parametric array t = linspace(0,2*pi,1000); %convert the angle to radians alpha = angle*pi/180; %calculate A A = 1 + (tan(alpha))^2; if (R^2 - Yp^2 + Yp^2/A < 0) continue; end %Parametric eqautions of the intersection contours x = sqrt(R^2 - Yp^2 + Yp^2/A)*cos(t); y = Yp/A + sqrt(R^2 - Yp^2 + Yp^2/A)/sqrt(A)*sin(t); z = y*tan(alpha); %Get the computed points in to a matrix points = [x;y;z]; %plot the intersection contours h = plot3(points(1,:),points(2,:),points(3,:));hold on; title('Intersection Contours');axis tight; %compute the rotational matrix Rx = rotx(-angle); %flatten the inclined contour points = Rx*points; %extract the X,Y coordinates from the flattened contour X = points(1,:); Y = points(2,:); %apply a shift to X coordinates X = X + Xus/2; %create an image pad image_pad = zeros(Xus/sx,Yus/sy); Xn = roundn(X,-1)*10; Yn = roundn(Y,-1)*10; %Sketch the boundary of the contour on the image pad for i = 1 : numel(Xn) image_pad(Xn(i),Yn(i)) = 255; end %convert the image pad matrix to uni8 im = mat2gray(image_pad); %fill the contour interior to complete the projection image im = imfill(im,'holes'); %save the image filename = 'p%d.bmp'; filename = sprintf(filename,image_index); image_index = image_index + 1; imwrite(im,filename); end %save the saveas(h,'Intersection_contours.fig');
MATLAB
3D
simonhmartin/dfs
sim_SFS_moments.py
.py
8,851
198
#!/usr/bin/env python # Simon H. Martin 2020 # simon.martin@ed.ac.uk # This script accompanies the paper: # "Signatures of introgression across the allelel frequency spectrum" # by Simon H. Martin and William Amos # It is a wrapper for the moments package (Jouganous et al. 2017, https://doi.org/10.1534/genetics.117.200493). # Simulation models are defined in periods. # Each period can have a defined length and size for each population # "--onePopPeriod" refers to periods before any population split. # If none of these are specified, it is assumed to have constant population size # "--twoPopPeriod" refers to periods after the first population split, and so on # for "--threePopPeriod" and "--fourPopPeriod". #Any number of periods can be specified. # Output is a frequency spectrum in tabular format. # The first N columns corrspond to base counts in the N populations # Final column represents the proportion of sites with that combination of base counts import numpy as np import moments import itertools, argparse, sys from collections import defaultdict def moments_model(params, ns): onePopPeriods,twoPopPeriods,threePopPeriods,fourPopPeriods = params # steady state for the equilibrium ancestral population sts = moments.LinearSystem_1D.steady_state_1D(sum(ns)) fs = moments.Spectrum(sts) # any changes in the ancestral population size are defined in the onePopPeriods for period in onePopPeriods: fs.integrate([period["N"]], period["T"]) if len(twoPopPeriods) >= 1: # Split the fs fs = moments.Manips.split_1D_to_2D(fs, ns[0], sum(ns[1:])) # any changes in the two resulting populations are defined in the twoPopPeriods # Second pop is ancestor of future pops 2, 3 and 4 for period in twoPopPeriods: fs.integrate([period["N1"],period["N2"]], period["T"], m=np.array([[0, period["m12"]], [period["m21"],0]])) if len(threePopPeriods) >= 1: # Second Split (split ancestor of 2 and 3) fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], sum(ns[2:])) # periods after second split have three pops and many migration rates for period in threePopPeriods: fs.integrate([period["N1"], period["N2"], period["N3"]], period["T"], m = np.array([[0, period["m12"], period["m13"]], [period["m21"],0, period["m23"]], [period["m31"],period["m32"],0 ]])) if len(fourPopPeriods) >= 1: # Second Split (split ancestor of 2 and 3) fs = moments.Manips.split_3D_to_4D_3(fs, ns[2], ns[3]) # periods after second split have three pops and many migration rates for period in fourPopPeriods: fs.integrate([period["N1"], period["N2"], period["N3"], period["N4"]], period["T"], m = np.array([[ 0, period["m12"],period["m13"],period["m14"]], [period["m21"], 0, period["m23"],period["m24"]], [period["m31"],period["m32"], 0 ,period["m34"]], [period["m41"],period["m42"],period["m43"], 0 ]])) return fs def sfsArrayToTable(sfs_array, excludeZero=True): indices = itertools.product(*[range(i) for i in sfs_array.shape]) return [list(idx) + [sfs_array[idx]] for idx in indices if not (sfs_array.mask[idx] or (excludeZero and sfs_array[idx]==0))] #a function to add the necessary parser arguments. This is so that you can import this function in other scripts and it'll automatically add the required arguments def addSimArgsToParser(parser): parser.add_argument("--Nsam", help="Number of samples in each population", type=int, action = "store", nargs="+") parser.add_argument("--onePopPeriod", help="Params for one pop period in format 'T=1 N=0.5'", nargs="+", action = "append") parser.add_argument("--twoPopPeriod", help="Params for two pop period in format 'T=1 N2=0.5 m12=0.01 etc'", nargs="+", action = "append") parser.add_argument("--threePopPeriod", help="Params for three pop period in format 'T=1 N2=0.5 m12=0.01 etc'", nargs="+", action = "append") parser.add_argument("--fourPopPeriod", help="Params for four pop period in format 'T=1 N2=0.5 m12=0.01 etc'", nargs="+", action = "append") def getParamDict(args): paramDict={'Nsam': args.Nsam} paramDict["onePopPeriods"] = [] paramDict["twoPopPeriods"] = [] paramDict["threePopPeriods"] = [] paramDict["fourPopPeriods"] = [] if args.onePopPeriod is not None: for period in args.onePopPeriod: paramDict["onePopPeriods"].append(defaultdict(int)) paramDict["onePopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values if paramDict["onePopPeriods"][-1]["N"] == 0: paramDict["onePopPeriods"][-1]["N"] = 1 if args.twoPopPeriod is not None: for period in args.twoPopPeriod: paramDict["twoPopPeriods"].append(defaultdict(int)) paramDict["twoPopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values for N in "N1","N2": if paramDict["twoPopPeriods"][-1][N] == 0: paramDict["twoPopPeriods"][-1][N] = 1 if args.threePopPeriod is not None: for period in args.threePopPeriod: paramDict["threePopPeriods"].append(defaultdict(int)) paramDict["threePopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values for N in "N1","N2","N3": if paramDict["threePopPeriods"][-1][N] == 0: paramDict["threePopPeriods"][-1][N] = 1 if args.fourPopPeriod is not None: for period in args.fourPopPeriod: paramDict["fourPopPeriods"].append(defaultdict(int)) paramDict["fourPopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values for N in "N1","N2","N3","N4": if paramDict["fourPopPeriods"][-1][N] == 0: paramDict["fourPopPeriods"][-1][N] = 1 return paramDict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--commandLinesFile", help="File of multiple command lines", action = "store") addSimArgsToParser(parser) #args = parser.parse_args("--Nsam 4 4 20 20 --twoPopPeriod T=1 --threePopPeriod T=1 --fourPopPeriod T=1".split()) args = parser.parse_args() if args.commandLinesFile: with open(args.commandLinesFile, "rt") as commandLinesFile: argsList = [parser.parse_args(l.split()) for l in commandLinesFile] else: argsList = [args] for _args_ in argsList: paramDict = getParamDict(_args_) #print(paramDict, file=sys.stderr) sys.stderr.write("\n{} periods in first phase\n".format(len(paramDict["onePopPeriods"]))) for period in paramDict["onePopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) sys.stderr.write("\n{} periods in second phase\n".format(len(paramDict["twoPopPeriods"]))) for period in paramDict["twoPopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) sys.stderr.write("\n{} periods in third phase\n".format(len(paramDict["threePopPeriods"]))) for period in paramDict["threePopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) sys.stderr.write("\n{} periods in fourth phase\n".format(len(paramDict["fourPopPeriods"]))) for period in paramDict["fourPopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) ### sfs = moments_model((paramDict["onePopPeriods"], paramDict["twoPopPeriods"], paramDict["threePopPeriods"], paramDict["fourPopPeriods"]), paramDict["Nsam"]) sfsTable = sfsArrayToTable(sfs.round(6)) sys.stdout.write("\n".join(["\t".join([str(val) for val in lst]) for lst in sfsTable]) + "\n")
Python
3D
simonhmartin/dfs
DFS.R
.R
13,879
308
# Simon H. Martin 2020 # simon.martin@ed.ac.uk # This script accompanies the paper: # "Signatures of introgression across the allelel frequency spectrum" # by Simon H. Martin and William Amos # It contains functions for computing and plotting the D frequency spectrum # and related statistics from an input site frequency spectrum (SFS). # to accomodate large, sparse, frequency spectra, we represent the SFS in two parts. # First is the BASE_COUNTS: a matrix with the three columns corresponding to populations P1, P2 and P3. # An optional fourth column for the outgroup can also be provided, if the SFS is unpolarised # Second is the SITE_COUNTS: a vector with length corresponding to the number of rows # in the base_counts matrix, giving the number of sites corresponding to each combination of counts # The SFS may be sparse: not all combinations of counts need to be provided if there are zero sites # with a given combination. # Most functions also require the haploid sample sizes (Ns) of the three (or four) populations as input. # If these are not provided, they are inferred from the highest value in the base_counts columns. # This is not recommended for an empirical SFS, because high-frequency combinations are typically rare and may be absent. ################################################################################ #function to compute the D frequency spectrum get.DFS <- function(base_counts, site_counts=NULL, Ns=NULL){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!(((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) & Ns[1] == Ns[2])){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) N = Ns[1] #identify sites where P1 and P2 each have a specified allele frequency idx_1 = lapply(1:N, function(i) which(base_counts[,1] == i)) idx_2 = lapply(1:N, function(i) which(base_counts[,2] == i)) #get total counts of each pattern pattern_sums_by_count_1 <- sapply(1:N, function(i) apply(get.patterns(freqs[idx_1[[i]],1], freqs[idx_1[[i]],2], freqs[idx_1[[i]],3], freqs[idx_1[[i]],4]) * site_counts[idx_1[[i]]],2,sum)) pattern_sums_by_count_2 <- sapply(1:N, function(i) apply(get.patterns(freqs[idx_2[[i]],1], freqs[idx_2[[i]],2], freqs[idx_2[[i]],3], freqs[idx_2[[i]],4]) * site_counts[idx_2[[i]]],2,sum)) ABBA_by_count <- pattern_sums_by_count_2["ABBA",] BABA_by_count <- pattern_sums_by_count_1["BABA",] DFS <- (ABBA_by_count - BABA_by_count) / (ABBA_by_count + BABA_by_count) weights <- (ABBA_by_count + BABA_by_count)/ sum((ABBA_by_count + BABA_by_count)) data.frame(DFS=DFS, weights=weights, ABBA=ABBA_by_count, BABA=BABA_by_count) } #function to compute the overall D statistic get.D.from.base.counts <- function(base_counts, site_counts=NULL, Ns=NULL, full=FALSE){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) ){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) idx <- 1:nrow(freqs) #get total counts of each pattern pattern_sums <- apply(get.patterns(freqs[idx,1],freqs[idx,2],freqs[idx,3],freqs[idx,4]) * site_counts,2,sum) if (full == FALSE){ ABBA <- pattern_sums["ABBA"] BABA <- pattern_sums["BABA"] return(as.numeric((ABBA - BABA) / (ABBA + BABA))) } else{ ABBA_BAAB <- pattern_sums["ABBA_BAAB"] BABA_ABAB <- pattern_sums["BABA_ABAB"] return(as.numeric((ABBA_BAAB - BABA_ABAB) / (ABBA_BAAB + BABA_ABAB))) } } # Function to compute the overall f statistic (Green et al. 2010) # Assumes that P3 is the donor population and P2 the only recipient. # If this is false, the value will be misleading get.f.from.base.counts <- function(base_counts, site_counts=NULL, Ns=NULL, full=FALSE){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) { print("WARNING: Ns not provided, assuming maximum count observed per population is N haploid samples") Ns <- apply(base_counts, 2, max) } #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) ){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) idx <- 1:nrow(freqs) #get total counts of each pattern pattern_sums <- apply(get.patterns(freqs[idx,1],freqs[idx,2],freqs[idx,3],freqs[idx,4]) * site_counts,2,sum) if (full == FALSE){ ABBA <- pattern_sums["ABBA"] BABA <- pattern_sums["BABA"] ABBA_f <- pattern_sums["ABBA_f"] BABA_f <- pattern_sums["BABA_f"] return(as.numeric((ABBA - BABA) / (ABBA_f - BABA_f))) } else{ ABBA_BAAB <- pattern_sums["ABBA_BAAB"] BABA_ABAB <- pattern_sums["BABA_ABAB"] ABBA_BAAB_f <- pattern_sums["ABBA_BAAB_f"] BABA_ABAB_f <- pattern_sums["BABA_ABAB_f"] return(as.numeric((ABBA_BAAB - BABA_ABAB) / (ABBA_BAAB_f - BABA_ABAB_f))) } } #function to compute the f4 statistic (Patterson et al. 2012) get.f4.from.base.counts <- function(base_counts, site_counts=NULL, Ns=NULL) { #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) freqs = base_counts / t(replicate(nrow(base_counts), Ns)) f4_by_site <- (freqs[,1]-freqs[,2])*(freqs[,3]-freqs[,4]) if(is.null(site_counts) == TRUE) return(mean(f4_by_site)) else weighted.mean(f4_by_site,site_counts) } #fucntion to compute the doubly-conditioned frequency spectrum (Yang et al. 2012) get.dcfs <- function(base_counts, site_counts=NULL, Ns=NULL){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) #assert that base counts are for three populations (they must be polarized) if (ncol(base_counts) != 3 | length(Ns) != 3){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) N = Ns[2] #identify sites P2 has a specified allele frequency indices = lapply(1:N, function(i) which(base_counts[,2] == i)) #for each frequency in P2, multiply by probability of getting an ancestral allele in P1 and derived in P3, and sum site counts dcfs_unscaled <- sapply(indices, function(idx) sum((1-freqs[idx,1]) * freqs[idx,3] * site_counts[idx])) dcfs_unscaled/sum(dcfs_unscaled) } #an experimental function to compute a 2-dimensional D frequency spectrum get.DFS2D <- function(base_counts, site_counts=NULL, Ns=NULL){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They givethe number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!(((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) & Ns[1] == Ns[2])){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) #identify sites where P1 and P2 each have a specified allele frequency pattern_sums_array <- array(dim=c(Ns[1],Ns[2],2), dimnames = list(1:Ns[1], 1:Ns[2], c("ABBA", "BABA"))) for (i in 1:Ns[1]){ for (j in 1:Ns[2]){ idx = which(base_counts[,1] == i & base_counts[,2] == j) pattern_sums_array[i,j,] <- apply(get.patterns(freqs[idx,1], freqs[idx,2], freqs[idx,3], freqs[idx,4])[,c("ABBA","BABA")] * site_counts[idx],2,sum) } } DFS <- (pattern_sums_array[,,"ABBA"] - pattern_sums_array[,,"BABA"]) / (pattern_sums_array[,,"ABBA"] + pattern_sums_array[,,"BABA"]) weights <- (pattern_sums_array[,,"ABBA"] + pattern_sums_array[,,"BABA"])/ sum((pattern_sums_array[,,"ABBA"] + pattern_sums_array[,,"BABA"])) list(DFS2D=DFS, weights=weights) } #function to polarize counts for unpolarized SFS polarize.counts <- function(counts, Ns, OGcolumn=NULL, outgroup_pol_to_NA=TRUE){ #If outgroup column is not specified, assume it is last one OG <- ifelse(is.null(OGcolumn) == TRUE, ncol(counts), OGcolumn) counts_plr <- counts if (outgroup_pol_to_NA == TRUE){ counts_plr[counts_plr[,OG]!=0 & counts_plr[,OG]!=Ns[OG],] <- NA } #flip sites where numbers must be flipped flip_idx = which(counts_plr[,OG] > Ns[OG]/2) counts_plr[flip_idx,] <- t(apply(counts[flip_idx,], 1, function(x) Ns-x)) counts_plr } #function to compute ABBA and BABA counts from allele frequencies for four populations get.patterns <- function(p1, p2, p3, pO=NULL){ #assume that if there's no outgroup, it is always fixed ancestral if (is.null(pO)==TRUE | length(pO) == 0) pO <- 0 data.frame(ABBA = (1 - p1) * p2 * p3 * (1-pO), BABA = p1 * (1 - p2) * p3 * (1-pO), ABBA_BAAB = (1 - p1) * p2 * p3 * (1-pO) + p1 * (1-p2) * (1-p3) * pO, BABA_ABAB = p1 * (1 - p2) * p3 * (1-pO) + (1-p1) * p2 * (1-p3) * pO, ABBA_f = (1 - p1) * p3 * p3 * (1-pO), BABA_f = p1 * (1-p3) * p3 * (1-pO), ABBA_BAAB_f = (1 - p1) * p3 * p3 * (1-pO) + p1 * (1-p3)**2 * pO, BABA_ABAB_f = p1 * (1 - p3) * p3 * (1-pO) + (1-p1) * p3 * (1-p3) * pO ) } # Most functiona here use a table format for the SFS, this is more suitable for a large, sparce array # This function converts to the more conventional NxN(xN...) array. sfs.table.to.array <- function(sfs_table, dims=NULL, count_col=NULL){ if (is.null(dims)==TRUE) dims <- apply(sfs_table[,-ncol(sfs_table)], 2, max) + 1 ndim <- length(dims) if (is.null(count_col)==TRUE) count_col <- ndim+1 arr <- array(dim=dims) for (i in 1:nrow(sfs_table)){ arr[as.matrix(sfs_table[i,-count_col])+1] <- sfs_table[i,count_col] } arr } ################################################ plotting functions plotDFS <- function(DFS, weights, method="lines", ylim=c(-1,1), show_D=TRUE, col="black", col_D="black", width_scale=100, no_xlab=FALSE, add=FALSE){ if (method == "lines"){ N = length(DFS) if (add == FALSE){ plot(0, xlim = c(1,N), ylim = ylim, cex=0, xlab = "", ylab = "", xaxt="n", bty="n") abline(h=0) } segments(1:N, 0, 1:N, DFS, lwd = width_scale*weights, lend=1, col=col) } if (method == "bars") barplot(DFS, col= rgb(0,0,0,weights), ylim = ylim, add=add) if (method == "scaled_bars") barplot(DFS*weights, ylim = ylim, add=add) if (no_xlab == FALSE & add == FALSE) mtext(1,text="Derived allele frequency", line = 0) if (add == FALSE) mtext(2,text=expression(italic("D")), line = 2.8, las=2) if (show_D == TRUE) abline(h= sum(DFS * weights), lty = 2, col=col_D) } plot.dcfs <- function(dcfs){ plot(dcfs, type="b") }
R
3D
simonhmartin/dfs
sim_DFS_moments.R
.R
28,743
580
# Simon H. Martin 2020 # simon.martin@ed.ac.uk ################################ Start Here #################################### # This script accompanies the paper: # "Signatures of introgression across the allelel frequency spectrum" # by Simon H. Martin and William Amos # It provides code for setting up and simulating site frequency spectra # under various models, and then plotting the D frequency spectrum (DFS). # Simulations are performed using moments (Jouganous et al. 2017). #import necessary libraries library(parallel) library(data.table) #import code for computaing and plotting DFS and related statistics source("DFS.R") #change limit for scientific notation (this avoids exporting numbers in strange fromat when making commands) options(scipen = 10) ################################################################################ ############# Functions for making the simulation input commands ############### ################################################################################ ### NOTE by default in moments (as in dadi), first split produces P2 from P1. Second produced P3 from P2 ### Thus, for classic ABBA BABA, P1 and P3 are reversed (e.g. P1 is Neanderthal and P3 is Africa) if.null <- function(x,y){ if (is.null(x)==TRUE) return(y) return(x) } #make moments simulation command for three population models make_3pop_sim_command <- function(params){ paste("python", "sim_SFS_moments.py", "--Nsam", paste(params$n1,params$n2,params$n3), "--twoPopPeriod", paste0("T=",params$p2_T, #input is in units of 2N gen " N1=",if.null(params$p2_N1,1), " N2=",if.null(params$p2_N2,1), " m12=",if.null(params$p2_m12,0), " m21=",if.null(params$p2_m21,0)), "--threePopPeriod", paste0("T=",params$p31_T, " N1=",if.null(params$p31_N1,1), " N2=",if.null(params$p31_N2,1), " N3=",if.null(params$p31_N3,1), " m12=",if.null(params$p31_m12,0), " m21=",if.null(params$p31_m21,0), " m23=",if.null(params$p31_m23,0), " m32=",if.null(params$p31_m32,0), " m13=",if.null(params$p31_m13,0), " m31=",if.null(params$p31_m31,0)), "--threePopPeriod", paste0("T=",params$p32_T, " N1=",if.null(params$p32_N1,1), " N2=",if.null(params$p32_N2,1), " N3=",if.null(params$p32_N3,1), " m12=",if.null(params$p32_m12,0), " m21=",if.null(params$p32_m21,0), " m23=",if.null(params$p32_m23,0), " m32=",if.null(params$p32_m32,0), " m13=",if.null(params$p32_m13,0), " m31=",if.null(params$p32_m31,0)), " 2> /dev/null") } #make moments simulation command for four population models make_4pop_sim_command <- function(params){ paste("python", "sim_SFS_moments.py", "--Nsam", paste(params$n1,params$n2,params$n3,params$n4), "--twoPopPeriod", paste0("T=",params$p2_T, #input is in units of 2N gen " N1=",if.null(params$p2_N1,1), " N2=",if.null(params$p2_N2,1), " m12=",if.null(params$p2_m12,0), " m21=",if.null(params$p2_m21,0)), "--threePopPeriod", paste0("T=",params$p3_T, " N1=",if.null(params$p3_N1,1), " N2=",if.null(params$p3_N2,1), " N3=",if.null(params$p3_N3,1), " m12=",if.null(params$p3_m12,0), " m21=",if.null(params$p3_m21,0), " m13=",if.null(params$p3_m13,0), " m31=",if.null(params$p3_m31,0), " m23=",if.null(params$p3_m23,0), " m32=",if.null(params$p3_m32,0)), "--fourPopPeriod", paste0("T=",params$p41_T, " N1=",if.null(params$p41_N1,1), " N2=",if.null(params$p41_N2,1), " N3=",if.null(params$p41_N3,1), " N4=",if.null(params$p41_N4,1), " m12=",if.null(params$p41_m12,0), " m21=",if.null(params$p41_m21,0), " m13=",if.null(params$p41_m13,0), " m31=",if.null(params$p41_m31,0), " m14=",if.null(params$p41_m14,0), " m41=",if.null(params$p41_m41,0), " m23=",if.null(params$p41_m23,0), " m32=",if.null(params$p41_m32,0), " m24=",if.null(params$p41_m24,0), " m42=",if.null(params$p41_m42,0), " m34=",if.null(params$p41_m34,0), " m43=",if.null(params$p41_m43,0)), "--fourPopPeriod", paste0("T=",params$p42_T, " N1=",if.null(params$p42_N1,1), " N2=",if.null(params$p42_N2,1), " N3=",if.null(params$p42_N3,1), " N4=",if.null(params$p42_N4,1), " m12=",if.null(params$p42_m12,0), " m21=",if.null(params$p42_m21,0), " m13=",if.null(params$p42_m13,0), " m31=",if.null(params$p42_m31,0), " m14=",if.null(params$p42_m14,0), " m41=",if.null(params$p42_m41,0), " m23=",if.null(params$p42_m23,0), " m32=",if.null(params$p42_m32,0), " m24=",if.null(params$p42_m24,0), " m42=",if.null(params$p42_m42,0), " m34=",if.null(params$p42_m34,0), " m43=",if.null(params$p42_m43,0)), " 2> /dev/null") } #function to convert parameter names to those used by the moments script. #This is just because, like dadi, moments numbers pops differently from the typical ABBA BABA numbering make_moments_params_3pop <- function(params){ list(n1 = params$n3, n2 = params$n2, n3 = params$n1, #number of samples in each pop p2_T = params$p2_T, #length of two pop periods (in 2N generations) p2_m21 = params$p2_m12_3, #first three pop period m23 p2_m12 = params$p2_m3_12, #first three pop period m32 p2_N1 = params$p2_N3, #first twp pop period N1 p2_N2 = params$p2_N12, #first twp pop period N1 p31_T = params$p31_T, #first three pop period p31_N1 = params$p31_N3, #first three pop period N1 p31_N2 = params$p31_N2, #first three pop period N2 p31_N3 = params$p31_N1, #first three pop period N2 p31_m21 = params$p31_m23, #first three pop period m23 p31_m12 = params$p31_m32, #first three pop period m32 p31_m31 = params$p31_m13, #first three pop period m31 p31_m13 = params$p31_m31, #first three pop period m13 p31_m23 = params$p31_m21, #first three pop period m23 p31_m32 = params$p31_m12, #first three pop period m32 p32_T = params$p32_T, #second three pop period p32_N1 = params$p32_N3, #second three pop period N1 p32_N2 = params$p32_N2, #second three pop period N2 p32_N3 = params$p32_N1, #second three pop period N2 p32_m21 = params$p32_m23, #second three pop period m23 p32_m12 = params$p32_m32, #second three pop period m32 p32_m31 = params$p32_m13, #second three pop period m31 p32_m13 = params$p32_m31, #second three pop period m13 p32_m23 = params$p32_m21, #second three pop period m23 p32_m32 = params$p32_m12 #second three pop period m32 ) } ################################################################################## ################################################################################## #################### RUN SINGLE SIMULATION AND PLOT ############################ ################################################################################## ################################################################################## #below is code for running a single simulation and plotting the result directly ################################################################################## ######################### standard 3 pop model ################################# ################################################################################## # NOTE in moments, as in in dadi, first split produces pop 2 from 1. Second produces 3 from 2 # So for ABBA BABA traditional P3 (e.g. Neanderthal) is pop 1 in dadi and P1 is pop 3 in moments # so the migration is simulated as between pops 1 and 2 # This was dealt with above using the make_moments_params_3pop function, # but here we just remember toswitch the numbers #Three population model params <- list(n1 = 4, n2=20, n3=20, #number of samples in each pop p2_T = 0.2, #length of two pop periods (in 2N generations) p2_m21 = 0, #first three pop period m23 p2_m12 = 0, #first three pop period m32 p31_T = 0.1, #first three pop period p31_N1 = 1, #first three pop period N1 p31_N2 = 1, #first three pop period N2 p31_N3 = 1, #first three pop period N2 p31_m21 = 0, #first three pop period m23 p31_m12 = 0, #first three pop period m32 p31_m31 = 0, #first three pop period m31 p31_m13 = 0, #first three pop period m13 p32_T = 0.1, #second three pop period p32_N1 = 1, #second three pop period N1 p32_N2 = 1, #second three pop period N2 p32_N3 = 1, #second three pop period N3 p32_m21 = 1, #second three pop period m23 p32_m12 = 0, #second three pop period m32 p32_m31 = 0, #second three pop period m31 p32_m13 = 0, #second three pop period m13 p32_m23 = 0, #second three pop period m31 p32_m32 = 0 #second three pop period m13 ) #population indices (remember dadi first split produced P2 from P1 and second split produced P3 from P2) P1 <- 3 P2 <- 2 P3 <- 1 #make command command <- make_3pop_sim_command(params) print(command) #run simulation SFS <- read.table(text=system(command, intern=TRUE,ignore.stderr=FALSE)) #get DFS dfs_data <- get.DFS(base_counts=SFS[,c(P1,P2,P3)], site_counts=SFS[,ncol(SFS)], Ns=c(params$n1,params$n2,params$n3,params$n4)[c(P1,P2,P3)]) #plot plotDFS(dfs_data$DFS, dfs_data$weights, col_D="red") dfs_data ################################################################################## ####################### ancestral structure model ############################## ################################################################################## # NOTE in moments, first split produces dadi pop 2 from 1. Second produces 3 from 2 # for ancestral structure model, this is what we want # because the first split introduces structure (e.g. in Africa) and second splits out pop off (e.g. neanderthal) (see Figure in Yang et al. 2012) params <- list(n1 = 20, n2=20, n3=4, #number of samples in each pop p2_T = 1, #length of ancestral structure period pop period (in N generations) p2_m12 = 2, #migration during ancestral structure period p2_m21 = 2, #migration during ancestral structure period p31_T = 0.1, #length of continued structure after first split p31_m12 = 2, #first three pop period m23 p31_m21 = 2, #first three pop period m23 p32_T = 0.1 #split between ingroups ) #population indices for this model the pops are correct because first split produces e.g. Neanderthal, which is P1 here P1 <- 1 P2 <- 2 P3 <- 3 #make command command <- make_3pop_sim_command(params) print(command) #run simulation SFS <- read.table(text=system(command, intern=TRUE,ignore.stderr=FALSE)) #get DFS dfs_data <- get.DFS(base_counts=SFS[,c(P1,P2,P3)], site_counts=SFS[,ncol(SFS)], Ns=c(params$n1,params$n2,params$n3,params$n4)[c(P1,P2,P3)]) # for comparison, also get dcfs (Yang et al. 2012) dcfs <- get.dcfs(base_counts=SFS[,c(P1,P2,P3)], site_counts=SFS[,4], Ns=c(params$n1,params$n2,params$n3)[c(P1,P2,P3)]) #plot both DFS and dcfs par(mfrow = c(2,1)) plotDFS(dfs_data$DFS, dfs_data$weights) plot(dcfs, type="b") ################################################################################## ################################# 4 pop model ################################## ################################################################################## params <- list(n1 = 4, n2=10, n3=10, n4=10, #number of samples in each pop p2_T = 0.1, #length of two pop periods (in 2N generations) p3_T = 0.1, #first three pop period p3_N1 = 1, #first three pop period N1 p3_N2 = 1, #first three pop period N2 p3_N3 = 1, #first three pop period N2 p3_m21 = 0, #first three pop period m23 p3_m12 = 0, #first three pop period m32 p3_m31 = 0, #first three pop period m31 p3_m13 = 0, #first three pop period m13 p41_T = 0.1, #second three pop period p41_N1 = 1, #second three pop period N1 p41_N2 = 1, #second three pop period N2 p41_N3 = 1, #second three pop period N3 p41_N4 = 1, #second three pop period N4 p41_m23 = 0, #second three pop period m31 p41_m32 = 0, #second three pop period m13 p41_m24 = 0, #second three pop period m31 p41_m42 = 0, #second three pop period m13 p41_m34 = 0, #second three pop period m31 p41_m43 = 0, #second three pop period m13 p42_T = 0.1, #second three pop period p42_N1 = 1, #second three pop period N1 p42_N2 = 1, #second three pop period N2 p42_N3 = 1, #second three pop period N3 p42_N4 = 1, #second three pop period N4 p42_m23 = 0, #second three pop period m31 p42_m32 = 1, #second three pop period m13 p42_m24 = 0, #second three pop period m31 p42_m42 = 0, #second three pop period m13 p42_m34 = 0, #second three pop period m31 p42_m43 = 0 #second three pop period m13 ) #population indices (remember in moments first split produced P2 from P1 and second split produced P3 from P2) P1 <- 4 P2 <- 3 P3 <- 2 OG <- 1 command <- make_4pop_sim_command(params) #run simulation SFS <- read.table(text=system(command, intern=TRUE,ignore.stderr=FALSE)) # With four populations there are different ways to compute DFS. # In this case, we have a polarised SFS, so we can add the fourth population and compute as before. # This way, we will also be using sites that are polymorphic in the outgroup, but scaling according to the frequency of thenacestral allele dfs_data_OG <- get.DFS(base_counts=SFS[,c(P1,P2,P3,OG)], site_counts=SFS[,5], Ns=c(params$n1,params$n2,params$n3,params$n4)[c(P1,P2,P3,OG)]) plotDFS(dfs_data_OG$DFS, dfs_data$weights) # Alternatively, we can pretend that we don't know what the ancestral allele is (ie. an unpolarised SFS) # In this case, we have to first polarise the counts, using the outgroup as a reference for what is ancestral. # Sites that are polymorphic in the outgroup will be ignored as they are ambiguous dfs_data_plr <- get.DFS(base_counts=polarize.counts(SFS[,-5], Ns=c(params$n1,params$n2,params$n3,params$n4), OGcolumn=OG, outgroup_pol_to_NA=TRUE)[,c(P1,P2,P3,OG)], site_counts=SFS[,5], Ns=c(params$n1,params$n2,params$n3,params$n4)[c(P1,P2,P3,OG)]) plotDFS(dfs_data_plr$DFS, dfs_data_plr$weights) ################################################################################## ################################################################################## ############################ BATCH SIMULATIONS ############################ ################################################################################## ################################################################################## # Below is code for simulating many thousands of combinations of parameters to explore parameter space. expand.grid.restricted <- function(defaults, groups, max_alt_per_group, alternatives){ #this function is like expand.grid, except if you have too many combos, you can restrict the number that can vary at any time. #you define groups of variables and set the number that can vary per group #first check that the number of groups provided matches the length of the value list if (length(defaults) != length(groups)){ print("Mismatched lengths of inputs.") } #get unique list of groups GROUPS = unique(groups) if (length(GROUPS) != length(max_alt_per_group)){ print("'max_alt_per_group' must have one value for each group.") return() } if (is.null(names(defaults))) names(defaults) <- 1:length(defaults) value_names <- names(defaults) #make lists of groups to vary at any given time groups_to_vary <- GROUPS[which(max_alt_per_group >= 1)] group_combos_to_vary <- list() for (x in 1:length(groups_to_vary)) group_combos_to_vary <- c(group_combos_to_vary, combn(groups_to_vary, x, simplify=F)) #make lists of which variables will vary at any given time value_name_combos_to_vary_by_group <- list() for (group in GROUPS){ value_name_combos_to_vary_by_group[[group]] <- list() if (max_alt_per_group[group] >= 1){ for (x in 1:max_alt_per_group[group]){ value_name_combos_to_vary_by_group[[group]] <- c(value_name_combos_to_vary_by_group[[group]], combn(value_names[which(groups==group)], x, simplify=F)) } } } #now we know which groups will vary, and which values will vary within each group #now make list of values to expand for each of these case value_lists_to_expand <- list(as.list(defaults)) #the first is all defaults only n=1 for (group_combo in group_combos_to_vary){ #now we know which groups will vary, we need to pull out the names of the values to vary from each group. There are multiple combinations of these group_combo_value_combos <- expand.grid(lapply(value_name_combos_to_vary_by_group, function(x) 1:length(x))[group_combo]) #and make a vectore of the value names to vary for (i in 1:nrow(group_combo_value_combos)){ current_value_names_to_vary <- unlist(lapply(1:length(group_combo), function(j) value_name_combos_to_vary_by_group[[group_combo[j]]][[group_combo_value_combos[i,j]]])) #now make a list of defaults and then change those that need to be expanded current_value_list <- as.list(defaults) for (value_name in current_value_names_to_vary){ current_value_list[[value_name]] <- alternatives[[value_name]] } n <- n+1 value_lists_to_expand[[n]] <- current_value_list } } as.data.frame(rbindlist(lapply(value_lists_to_expand, expand.grid))) } ################################################################################## ################ set parameters (user to make changes here) ################# ################################################################################## #set parameters common to all models ### NOTE in dadi, first split produces P2 from P1. Second produced P3 from P2 ### Thus, for classic ABBA BABA, P1 and P3 are reversed (P1 is Neanderthal and P3 is Africa) defaults <- list(n1 = 20, n2=20, n3=4, #number of samples in each pop p2_T = 0.1, #length of two pop periods (in N generations) p31_T = 0.1, #first three pop period p32_T = 0.1, #second three pop period p2_N12 = 1, #pop size ancestor of P1 and P2 p2_N3 = 1, #pop size ancestor P3 p2_m12_3 = 0, #migration p2_m3_12 = 0, #migration p31_N1 = 1, #first three pop period N1 p31_N2 = 1, #first three pop period N2 p31_N3 = 1, #first three pop period N2 p32_N1 = 1, #second three pop period N1 p32_N2 = 1, #second three pop period N2 p32_N3 = 1, #second three pop period N3 p31_m12 = 0, #first three pop period m12 p31_m21 = 0, #first three pop period m21 p31_m23 = 0, #first three pop period m23 p31_m32 = 0, #first three pop period m32 p31_m13 = 0, #first three pop period m13 p31_m31 = 0, #first three pop period m31 p32_m12 = 0, #second three pop period m12 p32_m21 = 0, #second three pop period m21 p32_m23 = 0, #second three pop period m23 p32_m32 = 0, #second three pop period m32 p32_m13 = 0, #second three pop period m13 p32_m31 = 0 #second three pop period m31 ) #set alternatives for split times, population sizes and migration rates alt_times <- c(0.2,0.5) alt_Ns <- c(0.1,5) alt_migs <- c(0.5,1,2,5) alternatives <- list(n1 = NA, n2=NA, n3=NA, #number of samples in each pop p2_T = alt_times, #length of two pop periods (in N generations) p31_T = alt_times, #first three pop period p32_T = alt_times, #second three pop period p2_N12 = alt_Ns, #pop size ancestor of P1 and P2 p2_N3 = alt_Ns, #pop size ancestor P3 p31_N1 = alt_Ns, #first three pop period N1 p31_N2 = alt_Ns, #first three pop period N2 p31_N3 = alt_Ns, #first three pop period N2 p32_N1 = alt_Ns, #second three pop period N1 p32_N2 = alt_Ns, #second three pop period N2 p32_N3 = alt_Ns, #second three pop period N3 p2_m12_3 = alt_migs, #two pop period m12 p2_m3_12 = alt_migs, #two pop period m12 p31_m12 = alt_migs, #first three pop period m12 p31_m21 = alt_migs, #first three pop period m21 p31_m23 = alt_migs, #first three pop period m23 p31_m32 = alt_migs, #first three pop period m32 p31_m13 = alt_migs, #first three pop period m13 p31_m31 = alt_migs, #first three pop period m31 p32_m12 = alt_migs, #second three pop period m12 p32_m21 = alt_migs, #second three pop period m21 p32_m23 = alt_migs, #second three pop period m23 p32_m32 = alt_migs, #second three pop period m32 p32_m13 = alt_migs, #second three pop period m13 p32_m31 = alt_migs #second three pop period m31 ) #use date as the name for this simulation run params_name <- Sys.Date() ################################################################################## ####################### prepare simulations ############################# ################################################################################## ### make parameter grids # Here we make all combinations of parameters. # The expand.grid.restricted command takes the defaults and alternatives (two lists of same length and order) # and also 'groups' which tells us which parameters are allowed to vary in each run. # For example group seven has a maximum alternatives of 2 # which means at most 2 of these parameters can vary from the defaults in a given run params_grid <- expand.grid.restricted(defaults=defaults, groups=c(1,2,3, #samples sizes 4,5,6, #split times 7,7,7,7,7,7,7,7, #pop sizes 7,7,7,7,7,7,7,7,7,7,7,7,7,7), #migration rates max_alt_per_group=c(0,0,0,1,1,1,2), alternatives=alternatives) # make the simulation commands that will be fed to the python script commands <- sapply(1:nrow(params_grid), function(i) make_3pop_sim_command(make_moments_params_3pop(params_grid[i,]))) ################################################################################## ######################### RUN SIMULATIONS ###################################### ################################################################################## # do all reps at once. might take quite a bit of RAM, but sfs is not too lareg freqs_text <- mclapply(commands, system, intern=TRUE,ignore.stderr=FALSE, mc.cores = 20) #Read the text in as SFS tables counts <- lapply(freqs_text, function(t) read.table(text=t)) ################################################################################## ############################## GET DFS ########################################## ################################################################################## #get Dfs directly from derived allele counts # here we switch populations 1 and 3 because in moments the order of splitting is opposite to our model dfs_data <- mclapply(1:length(counts), function(i) get.DFS(base_counts=counts[[i]][,c(3,2,1)], site_counts=counts[[i]][,4], Ns=as.numeric(params_grid[i,c("n1","n2","n3")])), mc.cores=20) ################################################################################## ######################### Export Output ######################################## ################################################################################## output <- data.frame(param_set=unlist(lapply(1:length(dfs_data), function(i) rep(i,nrow(dfs_data[[i]])))), DFS=round(unlist(lapply(dfs_data, function(df) df$DFS)),4), weights=round(unlist(lapply(dfs_data, function(df) df$weights)),4)) #Dfs data all in one file write.table(output, file = paste("sim_DFS_moments", params_name, "tsv", sep="."), quote=F, row.names=F, sep="\t") #export parameter grid write.table(cbind(data.frame(param_set=1:nrow(params_grid)),params_grid), file = paste("params", params_name, "tsv", sep="."), quote=F, row.names=F, col.names=T, sep="\t")
R
3D
simonhmartin/dfs
plot_DFS_from_SFS.R
.R
8,640
194
# Simon H. Martin 2020 # simon.martin@ed.ac.uk ################################ Start Here #################################### # This script accompanies the paper: # "Signatures of introgression across the allelel frequency spectrum" # by Simon H. Martin and William Amos # Each section below computes and plots the D frequency spectrum (DFS) # from a different empirical site frequency spectrum (SFS). # The input SFS is provided in tabular format: # A tab-delimited table in which the first three columns (or 4 in the case of a 4D SFS) # give the allele count in each population (equivalent to the indices of the multidimensional SFS). # The final column gives the corresponding number of sites. # In most cases the input SFS is 3D, with the first three columns corresponding to # populations P1, P2 and P3 (see the paper for details). This means that the SFS is # polarized, and the outgroup is assumed to carry the ancestral allele at these sites. # In the case of Helcionius butterflies, we provide an example of using a 4D SFS. # In this case, the input SFS is not polarzed. This means that the frequencies provided # correspond to minor allele counts, and the fourth column gives the count in teh outgroup. # Before DFS can be computed from a 4D SFS, it must first be polarized, and sites at # which the outgroup is polymorpic ust be discarded. #The functions to compute DFS and related statistics are provided in the accompanything script DFS.R #First import these functions source("DFS.R") ################################################################################ ############################## Arabidopsis #################################### ################################################################################ ### import the frequency spectrum FS <- read.table("empirical_data/Arabidopsis/arn_lyr_72.DP5MIN58MAC2.lyrata2_lyrata4_arenosa4.sfs") ### get Dfs dfs_data <- get.DFS(base_counts=FS[,-4], #base counts are the first three columns (i.e everything minus column 4) site_counts=FS[,4], # site counts are column 4 Ns = c(14,14,4)) # Ns provide the haploid sample sizes of each population (1 and 2 must always be equal) ### plot png("images/DFS_arabidopsis.lyr2_lyr4_arn4.png", width = 2000, height = 1000, res=300) par(mar=c(1,4,1,1)) plotDFS(dfs_data$DFS, dfs_data$weights, method="lines", col_D="red", no_xlab=T) dev.off() ### code for exporting a table of plotted values # write.table(data.frame(D=round(dfs_data$DFS,4), # weight=round(dfs_data$weights,4)), # file="empirical_data/Arabidopsis/DFS_arabidopsis.lyr2_lyr4_arn4.csv", # sep=",",row.names=FALSE,quote=FALSE) ################################################################################ ############################### Datepalms ###################################### ################################################################################ ### import the frequency spectrum FS <- read.table("empirical_data/datepalms/Flowers.SNPs.DP8.dactylifera_Iraq_dactylifera_Morocco_theophrasti.subsample10.sfs") ### get Dfs dfs_data <- get.DFS(base_counts=FS[,-4], site_counts=FS[,4], Ns = c(20,20,10)) ### plot png("images/DFS_Datepalms_dacIrq_dacMor_the.png", width = 2000, height = 1000, res=300) par(mar=c(1,4,1,1)) plotDFS(dfs_data$DFS, dfs_data$weights, method="lines", col_D="red", no_xlab=T) dev.off() # write.table(data.frame(D=round(dfs_data$DFS,4), # weight=round(dfs_data$weights,4)), # file="empirical_data/datepalms/DFS_Datepalms_dacIrq_dacMor_the.csv", # sep=",",row.names=FALSE,quote=FALSE) ################################################################################ ################################ sparrows #################################### ################################################################################ ### import the frequency spectrum FS <- read.table("empirical_data/sparrows/Walsh2018_Evolution.RawSNPData.DP4.NEL_allo_NEL_sym_SAL_allo.subsample12.sfs") ### get Dfs dfs_data <- get.DFS(base_counts=FS[,-4], site_counts=FS[,4], Ns = c(12,12,4)) ### plot png("images/DFS_sparrows_NELallo_NELsym_SALallo.png", width = 2000, height = 1000, res=300) par(mar=c(1,4,1,1)) plotDFS(dfs_data$DFS, dfs_data$weights, method="lines", ylim=c(-0.25,0.25), col_D="red", no_xlab=T) dev.off() # write.table(data.frame(D=round(dfs_data$DFS,4), # weight=round(dfs_data$weights,4)), # file="empirical_data/sparrows/DFS_sparrows_NELallo_NELsym_SALallo.csv", # sep=",",row.names=FALSE,quote=FALSE) ################################################################################ ############################### heliconius ################################### ################################################################################ #Here we have multiple inputs with different populations, so we first defien the population sets pop_combos <- c("mpg_ama_txn_slv", "chi_txn_ama_slv", "mpg_ros_chi_slv", "flo_chi_ros_slv") #for each combo, we compute dfs and plot for (pops in pop_combos){ #import frequenxy spectrum FS_heli <- read.table(paste0("empirical_data/Heliconius/bar92.DP8MIN92BIHET75.minor.",pops,".sfs")) #in this case the SFS was not polarised when it was computed (i.e it represents minor allele count). #we therefore first polarise each SFS using the ougroup (slv, column 5) before computing DFS base_counts_heli <- polarize.counts(FS_heli[,-5], Ns=c(20,20,20,4))[,-4] ### get Dfs dfs_data <- get.DFS(base_counts=base_counts_heli, site_counts=FS_heli[,5], Ns=c(20,20,20)) ### plot png(paste0("images/DFS_Heliconius_",pops,".png"), width = 2000, height = 1000, res=300) par(mar=c(1,4,1,1)) plotDFS(dfs_data$DFS, dfs_data$weights, method="lines", col_D="red", no_xlab=T) dev.off() ### code to export a table of plotted values # write.table(data.frame(D=round(dfs_data$DFS,4), # weight=round(dfs_data$weights,4)), # file=paste0("empirical_data/Heliconius/DFS_Heliconius_",pops,".csv"), # sep=",",row.names=FALSE,quote=FALSE) } ################################################################################ ############################## sticklebacks ################################## ################################################################################ ### import the frequency spectrum FS <- read.table("empirical_data/sticklebacks/groves2019.exclchr12.DP5.pun_sin_tym.subsample10.sfs") ### get Dfs dfs_data <- get.DFS(base_counts=FS[,-4], site_counts=site_counts <- FS[,4], c(10,10,10)) ### plot png("images/DFS_Stickelback_pun_sin_tym.png", width = 2000, height = 1000, res=300) par(mar=c(1,4,1,1)) plotDFS(dfs_data$DFS, dfs_data$weights, method="lines", col_D="red", no_xlab=T) dev.off() ### code to export a table of plotted values # write.table(data.frame(D=round(dfs_data$DFS,4), # weight=round(dfs_data$weights,4)), # file="empirical_data/sticklebacks/DFS_Stickelback_pun_sin_tym.csv", # sep=",",row.names=FALSE,quote=FALSE) ################################################################################ ################################# humans ##################################### ################################################################################ ### import the frequency spectrum FS <- read.table("empirical_data/humans/chimp_1000G_DNK_ALT.chr1.GWD_GBR_nea.sample100.sfs") ### get Dfs dfs_data <- get.DFS(base_counts=FS[,-4], site_counts=site_counts <- FS[,4], Ns = c(100,100,2)) # We can also compute related statistics D <- get.D.from.base.counts(base_counts=FS[,-4], site_counts=FS[,4], Ns = c(100,100,2)) #overall D f <- get.f.from.base.counts(base_counts=FS[,-4], site_counts=FS[,4], Ns = c(100,100,2)) # overall fraction of introgression dcfs <- get.dcfs(base_counts=FS[,-4], site_counts=FS[,4], Ns = c(100,100,2)) # dcfs from Yang et al. 2012 ### plot png("images/chimp_1000G_DNK_ALT.chr1.GWD_GBR_nea.sample100.png", width = 2000, height = 1000, res=300) par(mar=c(1,4,1,1)) plotDFS(dfs_data$DFS, dfs_data$weights, method="lines", col_D="red", no_xlab=T) dev.off() ### code to export a table of plotted values # write.table(data.frame(D=round(dfs_data$DFS,4), # weight=round(dfs_data$weights,4)), # file="empirical_data/humans/DFS_chimp_1000G_DNK_ALT.chr1.GWD_GBR_nea.sample100.csv", # sep=",",row.names=FALSE,quote=FALSE)
R
3D
simonhmartin/dfs
SHINY_sim_DFS_moments/sim_SFS_moments.py
.py
8,851
198
#!/usr/bin/env python # Simon H. Martin 2020 # simon.martin@ed.ac.uk # This script accompanies the paper: # "Signatures of introgression across the allelel frequency spectrum" # by Simon H. Martin and William Amos # It is a wrapper for the moments package (Jouganous et al. 2017, https://doi.org/10.1534/genetics.117.200493). # Simulation models are defined in periods. # Each period can have a defined length and size for each population # "--onePopPeriod" refers to periods before any population split. # If none of these are specified, it is assumed to have constant population size # "--twoPopPeriod" refers to periods after the first population split, and so on # for "--threePopPeriod" and "--fourPopPeriod". #Any number of periods can be specified. # Output is a frequency spectrum in tabular format. # The first N columns corrspond to base counts in the N populations # Final column represents the proportion of sites with that combination of base counts import numpy as np import moments import itertools, argparse, sys from collections import defaultdict def moments_model(params, ns): onePopPeriods,twoPopPeriods,threePopPeriods,fourPopPeriods = params # steady state for the equilibrium ancestral population sts = moments.LinearSystem_1D.steady_state_1D(sum(ns)) fs = moments.Spectrum(sts) # any changes in the ancestral population size are defined in the onePopPeriods for period in onePopPeriods: fs.integrate([period["N"]], period["T"]) if len(twoPopPeriods) >= 1: # Split the fs fs = moments.Manips.split_1D_to_2D(fs, ns[0], sum(ns[1:])) # any changes in the two resulting populations are defined in the twoPopPeriods # Second pop is ancestor of future pops 2, 3 and 4 for period in twoPopPeriods: fs.integrate([period["N1"],period["N2"]], period["T"], m=np.array([[0, period["m12"]], [period["m21"],0]])) if len(threePopPeriods) >= 1: # Second Split (split ancestor of 2 and 3) fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], sum(ns[2:])) # periods after second split have three pops and many migration rates for period in threePopPeriods: fs.integrate([period["N1"], period["N2"], period["N3"]], period["T"], m = np.array([[0, period["m12"], period["m13"]], [period["m21"],0, period["m23"]], [period["m31"],period["m32"],0 ]])) if len(fourPopPeriods) >= 1: # Second Split (split ancestor of 2 and 3) fs = moments.Manips.split_3D_to_4D_3(fs, ns[2], ns[3]) # periods after second split have three pops and many migration rates for period in fourPopPeriods: fs.integrate([period["N1"], period["N2"], period["N3"], period["N4"]], period["T"], m = np.array([[ 0, period["m12"],period["m13"],period["m14"]], [period["m21"], 0, period["m23"],period["m24"]], [period["m31"],period["m32"], 0 ,period["m34"]], [period["m41"],period["m42"],period["m43"], 0 ]])) return fs def sfsArrayToTable(sfs_array, excludeZero=True): indices = itertools.product(*[range(i) for i in sfs_array.shape]) return [list(idx) + [sfs_array[idx]] for idx in indices if not (sfs_array.mask[idx] or (excludeZero and sfs_array[idx]==0))] #a function to add the necessary parser arguments. This is so that you can import this function in other scripts and it'll automatically add the required arguments def addSimArgsToParser(parser): parser.add_argument("--Nsam", help="Number of samples in each population", type=int, action = "store", nargs="+") parser.add_argument("--onePopPeriod", help="Params for one pop period in format 'T=1 N=0.5'", nargs="+", action = "append") parser.add_argument("--twoPopPeriod", help="Params for two pop period in format 'T=1 N2=0.5 m12=0.01 etc'", nargs="+", action = "append") parser.add_argument("--threePopPeriod", help="Params for three pop period in format 'T=1 N2=0.5 m12=0.01 etc'", nargs="+", action = "append") parser.add_argument("--fourPopPeriod", help="Params for four pop period in format 'T=1 N2=0.5 m12=0.01 etc'", nargs="+", action = "append") def getParamDict(args): paramDict={'Nsam': args.Nsam} paramDict["onePopPeriods"] = [] paramDict["twoPopPeriods"] = [] paramDict["threePopPeriods"] = [] paramDict["fourPopPeriods"] = [] if args.onePopPeriod is not None: for period in args.onePopPeriod: paramDict["onePopPeriods"].append(defaultdict(int)) paramDict["onePopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values if paramDict["onePopPeriods"][-1]["N"] == 0: paramDict["onePopPeriods"][-1]["N"] = 1 if args.twoPopPeriod is not None: for period in args.twoPopPeriod: paramDict["twoPopPeriods"].append(defaultdict(int)) paramDict["twoPopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values for N in "N1","N2": if paramDict["twoPopPeriods"][-1][N] == 0: paramDict["twoPopPeriods"][-1][N] = 1 if args.threePopPeriod is not None: for period in args.threePopPeriod: paramDict["threePopPeriods"].append(defaultdict(int)) paramDict["threePopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values for N in "N1","N2","N3": if paramDict["threePopPeriods"][-1][N] == 0: paramDict["threePopPeriods"][-1][N] = 1 if args.fourPopPeriod is not None: for period in args.fourPopPeriod: paramDict["fourPopPeriods"].append(defaultdict(int)) paramDict["fourPopPeriods"][-1].update(dict([(x,float(y)) for x,y in [item.split("=") for item in period]])) #check N values for N in "N1","N2","N3","N4": if paramDict["fourPopPeriods"][-1][N] == 0: paramDict["fourPopPeriods"][-1][N] = 1 return paramDict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--commandLinesFile", help="File of multiple command lines", action = "store") addSimArgsToParser(parser) #args = parser.parse_args("--Nsam 4 4 20 20 --twoPopPeriod T=1 --threePopPeriod T=1 --fourPopPeriod T=1".split()) args = parser.parse_args() if args.commandLinesFile: with open(args.commandLinesFile, "rt") as commandLinesFile: argsList = [parser.parse_args(l.split()) for l in commandLinesFile] else: argsList = [args] for _args_ in argsList: paramDict = getParamDict(_args_) #print(paramDict, file=sys.stderr) sys.stderr.write("\n{} periods in first phase\n".format(len(paramDict["onePopPeriods"]))) for period in paramDict["onePopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) sys.stderr.write("\n{} periods in second phase\n".format(len(paramDict["twoPopPeriods"]))) for period in paramDict["twoPopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) sys.stderr.write("\n{} periods in third phase\n".format(len(paramDict["threePopPeriods"]))) for period in paramDict["threePopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) sys.stderr.write("\n{} periods in fourth phase\n".format(len(paramDict["fourPopPeriods"]))) for period in paramDict["fourPopPeriods"]: sys.stderr.write(" Length: {} x2N generations\n".format(period["T"])) #print(period,file=sys.stderr) ### sfs = moments_model((paramDict["onePopPeriods"], paramDict["twoPopPeriods"], paramDict["threePopPeriods"], paramDict["fourPopPeriods"]), paramDict["Nsam"]) sfsTable = sfsArrayToTable(sfs.round(6)) sys.stdout.write("\n".join(["\t".join([str(val) for val in lst]) for lst in sfsTable]) + "\n")
Python
3D
simonhmartin/dfs
SHINY_sim_DFS_moments/server.R
.R
13,039
274
source("DFS.R") if.null <- function(x,y){ if (is.null(x)==TRUE) return(y) return(x) } make_3pop_sim_command <- function(params){ paste("python3", "sim_SFS_moments.py", "--Nsam", paste(params$n1,params$n2,params$n3), "--twoPopPeriod", paste0("T=",params$p2_T, #input is in units of 2N gen " N1=",if.null(params$p2_N1,1), " N2=",if.null(params$p2_N2,1), " m12=",if.null(params$p2_m12,0), " m21=",if.null(params$p2_m21,0)), "--threePopPeriod", paste0("T=",params$p31_T, " N1=",if.null(params$p31_N1,1), " N2=",if.null(params$p31_N2,1), " N3=",if.null(params$p31_N3,1), " m12=",if.null(params$p31_m12,0), " m21=",if.null(params$p31_m21,0), " m23=",if.null(params$p31_m23,0), " m32=",if.null(params$p31_m32,0), " m13=",if.null(params$p31_m13,0), " m31=",if.null(params$p31_m31,0)), "--threePopPeriod", paste0("T=",params$p32_T, " N1=",if.null(params$p32_N1,1), " N2=",if.null(params$p32_N2,1), " N3=",if.null(params$p32_N3,1), " m12=",if.null(params$p32_m12,0), " m21=",if.null(params$p32_m21,0), " m23=",if.null(params$p32_m23,0), " m32=",if.null(params$p32_m32,0), " m13=",if.null(params$p32_m13,0), " m31=",if.null(params$p32_m31,0)), " 2> /dev/null") } plot.model <- function(params=NULL, p1_T=1, p2_T=1, p31_T=1, p32_T=1, p1_N=1, p2_N12=1, p2_N3=1, p31_N1=1, p31_N2=1, p31_N3=1, p32_N1=1, p32_N2=1, p32_N3=1, p2_m12_3=0, p2_m3_12=0, p31_m12=0, p31_m21=0, p31_m13=0, p31_m31=0, p31_m23=0, p31_m32=0, p32_m12=0, p32_m21=0, p32_m13=0, p32_m31=0, p32_m23=0, p32_m32=0, label_periods=FALSE, arrow_col="black", reverse_populations=FALSE){ try({p2_T=as.numeric(params$p2_T)}) try({p31_T=as.numeric(params$p31_T)}) try({p32_T=as.numeric(params$p32_T)}) try({p2_N12=as.numeric(params$p2_N12)}) try({p2_N3=as.numeric(params$p2_N3)}) try({p31_N1=as.numeric(params$p31_N1)}) try({p31_N2=as.numeric(params$p31_N2)}) try({p31_N3=as.numeric(params$p31_N3)}) try({p32_N1=as.numeric(params$p32_N1)}) try({p32_N2=as.numeric(params$p32_N2)}) try({p32_N3=as.numeric(params$p32_N3)}) try({p2_m12_3=as.numeric(params$p2_m12_3)}) try({p2_m3_12=as.numeric(params$p2_m3_12)}) try({p31_m12=as.numeric(params$p31_m12)}) try({p31_m21=as.numeric(params$p31_m21)}) try({p31_m13=as.numeric(params$p31_m13)}) try({p31_m31=as.numeric(params$p31_m31)}) try({p31_m23=as.numeric(params$p31_m23)}) try({p31_m32=as.numeric(params$p31_m32)}) try({p32_m12=as.numeric(params$p32_m12)}) try({p32_m21=as.numeric(params$p32_m21)}) try({p32_m13=as.numeric(params$p32_m13)}) try({p32_m31=as.numeric(params$p32_m31)}) try({p32_m23=as.numeric(params$p32_m23)}) try({p32_m32=as.numeric(params$p32_m32)}) maxN <- max(p1_N, p2_N12, p2_N3, p31_N1, p31_N2, p31_N3, p32_N1, p32_N2, p32_N3) dist <- maxN+1 p3_mid <- c(0, dist, 2*dist) p2_mid <- c(sum(p3_mid[1:2])/2, p3_mid[3]) p1_mid <- sum(p2_mid)/2 if (reverse_populations==FALSE){ left <- min(c(p1_mid, p2_mid, p3_mid)) - 0.5*maxN right <- max(c(p1_mid, p2_mid, p3_mid)) + 0.5*maxN } else{ right <- min(c(p1_mid, p2_mid, p3_mid)) - 0.5*maxN left <- max(c(p1_mid, p2_mid, p3_mid)) + 0.5*maxN } levels <- cumsum(rev(c(p1_T, p2_T, p31_T, p32_T))) top <- max(levels) plot(0, cex=0, ylim = c(0, top), xlim = c(left,right), bty="n", xaxt="n", ylab = "Generations ago (units of 2N)", xlab = "") rect(p3_mid - c(p32_N1, p32_N2, p32_N3)/2, 0, p3_mid + c(p32_N1, p32_N2, p32_N3)/2, levels[1], col="gray50", border=NA) rect(p3_mid - c(p31_N1, p31_N2, p31_N3)/2, levels[1], p3_mid + c(p31_N1, p31_N2, p31_N3)/2, levels[2], col="gray60", border=NA) rect(p2_mid - c(p2_N12, p2_N3)/2, levels[2], p2_mid + c(p2_N12, p2_N3)/2, levels[3], col="gray70", border=NA) rect(p1_mid - p1_N/2, levels[3], p1_mid + p1_N/2, levels[4], col="gray80", border=NA) segments(p3_mid[1] - p31_N1/2, levels[2], p3_mid[2] + p31_N2/2, levels[2], col="gray70") segments(p2_mid[1] - p2_N12/2, levels[3], p2_mid[2] + p2_N3/2, levels[3], col="gray80") if (p2_m12_3 > 0) arrows(p2_mid[2]-p2_N3/2, levels[2]+p2_T*.33, p2_mid[1]+p2_N12/2, levels[2]+p2_T*.33, lwd=p2_m12_3, col=arrow_col) if (p2_m3_12 > 0) arrows(p2_mid[1]+p2_N12/2, levels[2]+p2_T*.66, p2_mid[2]-p2_N3/2, levels[2]+p2_T*.66, lwd=p2_m3_12, col=arrow_col) if (p31_m23 > 0) arrows(p3_mid[3]-p31_N3/2, levels[1]+p31_T*.33, p3_mid[2]+p31_N2/2, levels[1]+p31_T*.33, lwd=p31_m23, col=arrow_col) if (p31_m32 > 0) arrows(p3_mid[2]+p31_N2/2, levels[1]+p31_T*.66, p3_mid[3]-p31_N3/2, levels[1]+p31_T*.66, lwd=p31_m32, col=arrow_col) if (p31_m13 > 0) arrows(p3_mid[3]-p31_N3/2, levels[1]+p31_T*.33, p3_mid[1]+p31_N1/2, levels[1]+p31_T*.33, lwd=p31_m13, col=arrow_col) if (p31_m31 > 0) arrows(p3_mid[1]+p31_N1/2, levels[1]+p31_T*.66, p3_mid[3]-p31_N3/2, levels[1]+p31_T*.66, lwd=p31_m31, col=arrow_col) if (p31_m12 > 0) arrows(p3_mid[2]-p31_N2/2, levels[1]+p31_T*.45, p3_mid[1]+p31_N1/2, levels[1]+p31_T*.45, lwd=p31_m12, col=arrow_col) if (p31_m21 > 0) arrows(p3_mid[1]+p31_N1/2, levels[1]+p31_T*.55, p3_mid[2]-p31_N2/2, levels[1]+p31_T*.55, lwd=p31_m21, col=arrow_col) if (p32_m23 > 0) arrows(p3_mid[3]-p32_N3/2, p32_T*.33, p3_mid[2]+p32_N2/2, p32_T*.33, lwd=p32_m23, col=arrow_col) if (p32_m32 > 0) arrows(p3_mid[2]+p32_N2/2, p32_T*.66, p3_mid[3]-p32_N3/2, p32_T*.66, lwd=p32_m32, col=arrow_col) if (p32_m13 > 0) arrows(p3_mid[3]-p32_N3/2, p32_T*.33, p3_mid[1]+p32_N1/2, p32_T*.33, lwd=p32_m13, col=arrow_col) if (p32_m31 > 0) arrows(p3_mid[1]+p32_N1/2, p32_T*.66, p3_mid[3]-p32_N3/2, p32_T*.66, lwd=p32_m31, col=arrow_col) if (p32_m12 > 0) arrows(p3_mid[2]-p32_N2/2, p32_T*.45, p3_mid[1]+p32_N1/2, p32_T*.45, lwd=p32_m12, col=arrow_col) if (p32_m21 > 0) arrows(p3_mid[1]+p32_N1/2, p32_T*.55, p3_mid[2]-p32_N2/2, p32_T*.55, lwd=p32_m21, col=arrow_col) if (reverse_populations == FALSE) mtext(1, at=p3_mid, text=1:3) else mtext(1, at=p3_mid, text=3:1) if(label_periods==TRUE){ mtext(4,at=c(p32_T*.5, levels[1]+p31_T*.5, levels[2]+p2_T*.5), text=c("Period 3.2", "Period 3.1", "Period 2"), las=2) } } run.simulation <- function(input){ #convert population numbers for dadi. 3 becomes 1 and 1 becomes 3 moments_params <- list(n1 = ifelse(input$swap13==FALSE, input$n3, input$n1_2), n2 = input$n1_2, n3 = ifelse(input$swap13==FALSE, input$n1_2, input$n3), #number of samples in each pop p2_T = input$p2_T, #length of two pop periods (in 2N generations) p2_m21 = input$p2_m12_3, #first three pop period m23 p2_m12 = input$p2_m3_12, #first three pop period m32 p2_N1 = input$p2_N3, #first twp pop period N1 p2_N2 = input$p2_N12, #first twp pop period N1 p31_T = input$p31_T, #first three pop period p31_N1 = input$p31_N3, #first three pop period N1 p31_N2 = input$p31_N2, #first three pop period N2 p31_N3 = input$p31_N1, #first three pop period N2 p31_m21 = input$p31_m23, #first three pop period m23 p31_m12 = input$p31_m32, #first three pop period m32 p31_m31 = input$p31_m13, #first three pop period m31 p31_m13 = input$p31_m31, #first three pop period m13 p31_m23 = input$p31_m21, #first three pop period m23 p31_m32 = input$p31_m12, #first three pop period m32 p32_T = input$p32_T, #second three pop period p32_N1 = input$p32_N3, #second three pop period N1 p32_N2 = input$p32_N2, #second three pop period N2 p32_N3 = input$p32_N1, #second three pop period N2 p32_m21 = input$p32_m23, #second three pop period m23 p32_m12 = input$p32_m32, #second three pop period m32 p32_m31 = input$p32_m13, #second three pop period m31 p32_m13 = input$p32_m31, #second three pop period m13 p32_m23 = input$p32_m21, #second three pop period m23 p32_m32 = input$p32_m12 #second three pop period m32 ) command <- make_3pop_sim_command(moments_params) print(command) read.table(text=system(command, intern=TRUE,ignore.stderr=FALSE)) } shinyServer( function(input, output, session){ reactive_data <- reactiveValues() output$model_plot <- renderPlot({ par(mar=c(1,15,1,15)) plot.model(params=input, p1_T=mean(as.numeric(input$p2_T), as.numeric(input$p31_T), as.numeric(input$p32_T))/2, label_periods=TRUE, arrow_col="red", reverse_populations=input$swap13) }, height=300) output$DFS_plot <- renderPlot({ get_SFS <- eventReactive(input$go, {run.simulation(input)}) reactive_data$SFS <- get_SFS() par(mar=c(1.2,6,2,4)) if (is.null(reactive_data$SFS) == FALSE){ if (input$swap13 == FALSE){ reactive_data$P1 <- 3 reactive_data$P3 <- 1 } else { reactive_data$P1 <- 1 reactive_data$P3 <- 3 } reactive_data$dfs_data <- get.DFS(base_counts=reactive_data$SFS[,c(reactive_data$P1,2,reactive_data$P3)], site_counts=reactive_data$SFS[,4], Ns=as.numeric(c(input$n1_2,input$n1_2,input$n3))) plotDFS(reactive_data$dfs_data$DFS, reactive_data$dfs_data$weights, col_D="red") } }, height=300) output$dcfs_plot <- renderPlot({ if (is.null(reactive_data$SFS) == FALSE){ if (input$dcfs == TRUE){ reactive_data$dcfs <- get.dcfs(base_counts=reactive_data$SFS[,c(reactive_data$P1,2,reactive_data$P3)], site_counts=reactive_data$SFS[,4], Ns=as.numeric(c(input$n1_2,input$n1_2,input$n3))) par(mar=c(1,6,2,4)) plot.dcfs(reactive_data$dcfs) } } }, height=300) observeEvent(input$save_files, { pdf(paste0(input$prefix,".model.pdf"), width=5,height=5) par(mar=c(1,5,1,1)) plot.model(params=input, p1_T=mean(as.numeric(input$p2_T), as.numeric(input$p31_T), as.numeric(input$p32_T))/2, arrow_col="red", reverse_populations=input$swap13) dev.off() png(paste0(input$prefix,".model.png"), width = 1000, height = 800, res=300) par(mar=c(1,5,1,1)) plot.model(params=input, p1_T=mean(as.numeric(input$p2_T), as.numeric(input$p31_T), as.numeric(input$p32_T))/2, arrow_col="red", reverse_populations=input$swap13) dev.off() pdf(paste0(input$prefix,".pdf"), width=10,height=4) par(mar=c(1.2,4,1,4)) plotDFS(reactive_data$dfs_data$DFS, reactive_data$dfs_data$weights, col_D="red") dev.off() png(paste0(input$prefix,".png"), width = 2000, height = 800, res=300) par(mar=c(1.2,4,1,4)) plotDFS(reactive_data$dfs_data$DFS, reactive_data$dfs_data$weights, col_D="red") dev.off() write.table(data.frame(D=round(reactive_data$dfs_data$DFS,4), weight=round(reactive_data$dfs_data$weights,4)), file=paste0(input$prefix,".csv"), sep=",",row.names=FALSE,quote=FALSE) write.table(reactiveValuesToList(input), file=paste0(input$prefix,".params.csv"), quote=F, row.names=F, sep=",") }) } )
R
3D
simonhmartin/dfs
SHINY_sim_DFS_moments/DFS.R
.R
13,879
308
# Simon H. Martin 2020 # simon.martin@ed.ac.uk # This script accompanies the paper: # "Signatures of introgression across the allelel frequency spectrum" # by Simon H. Martin and William Amos # It contains functions for computing and plotting the D frequency spectrum # and related statistics from an input site frequency spectrum (SFS). # to accomodate large, sparse, frequency spectra, we represent the SFS in two parts. # First is the BASE_COUNTS: a matrix with the three columns corresponding to populations P1, P2 and P3. # An optional fourth column for the outgroup can also be provided, if the SFS is unpolarised # Second is the SITE_COUNTS: a vector with length corresponding to the number of rows # in the base_counts matrix, giving the number of sites corresponding to each combination of counts # The SFS may be sparse: not all combinations of counts need to be provided if there are zero sites # with a given combination. # Most functions also require the haploid sample sizes (Ns) of the three (or four) populations as input. # If these are not provided, they are inferred from the highest value in the base_counts columns. # This is not recommended for an empirical SFS, because high-frequency combinations are typically rare and may be absent. ################################################################################ #function to compute the D frequency spectrum get.DFS <- function(base_counts, site_counts=NULL, Ns=NULL){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!(((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) & Ns[1] == Ns[2])){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) N = Ns[1] #identify sites where P1 and P2 each have a specified allele frequency idx_1 = lapply(1:N, function(i) which(base_counts[,1] == i)) idx_2 = lapply(1:N, function(i) which(base_counts[,2] == i)) #get total counts of each pattern pattern_sums_by_count_1 <- sapply(1:N, function(i) apply(get.patterns(freqs[idx_1[[i]],1], freqs[idx_1[[i]],2], freqs[idx_1[[i]],3], freqs[idx_1[[i]],4]) * site_counts[idx_1[[i]]],2,sum)) pattern_sums_by_count_2 <- sapply(1:N, function(i) apply(get.patterns(freqs[idx_2[[i]],1], freqs[idx_2[[i]],2], freqs[idx_2[[i]],3], freqs[idx_2[[i]],4]) * site_counts[idx_2[[i]]],2,sum)) ABBA_by_count <- pattern_sums_by_count_2["ABBA",] BABA_by_count <- pattern_sums_by_count_1["BABA",] DFS <- (ABBA_by_count - BABA_by_count) / (ABBA_by_count + BABA_by_count) weights <- (ABBA_by_count + BABA_by_count)/ sum((ABBA_by_count + BABA_by_count)) data.frame(DFS=DFS, weights=weights, ABBA=ABBA_by_count, BABA=BABA_by_count) } #function to compute the overall D statistic get.D.from.base.counts <- function(base_counts, site_counts=NULL, Ns=NULL, full=FALSE){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) ){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) idx <- 1:nrow(freqs) #get total counts of each pattern pattern_sums <- apply(get.patterns(freqs[idx,1],freqs[idx,2],freqs[idx,3],freqs[idx,4]) * site_counts,2,sum) if (full == FALSE){ ABBA <- pattern_sums["ABBA"] BABA <- pattern_sums["BABA"] return(as.numeric((ABBA - BABA) / (ABBA + BABA))) } else{ ABBA_BAAB <- pattern_sums["ABBA_BAAB"] BABA_ABAB <- pattern_sums["BABA_ABAB"] return(as.numeric((ABBA_BAAB - BABA_ABAB) / (ABBA_BAAB + BABA_ABAB))) } } # Function to compute the overall f statistic (Green et al. 2010) # Assumes that P3 is the donor population and P2 the only recipient. # If this is false, the value will be misleading get.f.from.base.counts <- function(base_counts, site_counts=NULL, Ns=NULL, full=FALSE){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) { print("WARNING: Ns not provided, assuming maximum count observed per population is N haploid samples") Ns <- apply(base_counts, 2, max) } #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) ){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) idx <- 1:nrow(freqs) #get total counts of each pattern pattern_sums <- apply(get.patterns(freqs[idx,1],freqs[idx,2],freqs[idx,3],freqs[idx,4]) * site_counts,2,sum) if (full == FALSE){ ABBA <- pattern_sums["ABBA"] BABA <- pattern_sums["BABA"] ABBA_f <- pattern_sums["ABBA_f"] BABA_f <- pattern_sums["BABA_f"] return(as.numeric((ABBA - BABA) / (ABBA_f - BABA_f))) } else{ ABBA_BAAB <- pattern_sums["ABBA_BAAB"] BABA_ABAB <- pattern_sums["BABA_ABAB"] ABBA_BAAB_f <- pattern_sums["ABBA_BAAB_f"] BABA_ABAB_f <- pattern_sums["BABA_ABAB_f"] return(as.numeric((ABBA_BAAB - BABA_ABAB) / (ABBA_BAAB_f - BABA_ABAB_f))) } } #function to compute the f4 statistic (Patterson et al. 2012) get.f4.from.base.counts <- function(base_counts, site_counts=NULL, Ns=NULL) { #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) freqs = base_counts / t(replicate(nrow(base_counts), Ns)) f4_by_site <- (freqs[,1]-freqs[,2])*(freqs[,3]-freqs[,4]) if(is.null(site_counts) == TRUE) return(mean(f4_by_site)) else weighted.mean(f4_by_site,site_counts) } #fucntion to compute the doubly-conditioned frequency spectrum (Yang et al. 2012) get.dcfs <- function(base_counts, site_counts=NULL, Ns=NULL){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They give the number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) #assert that base counts are for three populations (they must be polarized) if (ncol(base_counts) != 3 | length(Ns) != 3){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) N = Ns[2] #identify sites P2 has a specified allele frequency indices = lapply(1:N, function(i) which(base_counts[,2] == i)) #for each frequency in P2, multiply by probability of getting an ancestral allele in P1 and derived in P3, and sum site counts dcfs_unscaled <- sapply(indices, function(idx) sum((1-freqs[idx,1]) * freqs[idx,3] * site_counts[idx])) dcfs_unscaled/sum(dcfs_unscaled) } #an experimental function to compute a 2-dimensional D frequency spectrum get.DFS2D <- function(base_counts, site_counts=NULL, Ns=NULL){ #if the number of haplotypes per population is not specified, assume it is the maximum value observed - NOT RECOMMENDED if (is.null(Ns) == TRUE) Ns <- apply(base_counts, 2, max) #site counts are used to compress the input data. They givethe number of sites observed with those base counts #if not specified, assume all patterns are represented once if (is.null(site_counts) == TRUE) site_counts <- rep(1, nrow(base_counts)) if (!(((ncol(base_counts) == 4 & length(Ns) ==4) | (ncol(base_counts) == 3 & length(Ns) == 3)) & Ns[1] == Ns[2])){ print("Incorrect specification") return() } #convert base_counts to frequencies freqs = base_counts / t(replicate(nrow(base_counts), Ns)) #identify sites where P1 and P2 each have a specified allele frequency pattern_sums_array <- array(dim=c(Ns[1],Ns[2],2), dimnames = list(1:Ns[1], 1:Ns[2], c("ABBA", "BABA"))) for (i in 1:Ns[1]){ for (j in 1:Ns[2]){ idx = which(base_counts[,1] == i & base_counts[,2] == j) pattern_sums_array[i,j,] <- apply(get.patterns(freqs[idx,1], freqs[idx,2], freqs[idx,3], freqs[idx,4])[,c("ABBA","BABA")] * site_counts[idx],2,sum) } } DFS <- (pattern_sums_array[,,"ABBA"] - pattern_sums_array[,,"BABA"]) / (pattern_sums_array[,,"ABBA"] + pattern_sums_array[,,"BABA"]) weights <- (pattern_sums_array[,,"ABBA"] + pattern_sums_array[,,"BABA"])/ sum((pattern_sums_array[,,"ABBA"] + pattern_sums_array[,,"BABA"])) list(DFS2D=DFS, weights=weights) } #function to polarize counts for unpolarized SFS polarize.counts <- function(counts, Ns, OGcolumn=NULL, outgroup_pol_to_NA=TRUE){ #If outgroup column is not specified, assume it is last one OG <- ifelse(is.null(OGcolumn) == TRUE, ncol(counts), OGcolumn) counts_plr <- counts if (outgroup_pol_to_NA == TRUE){ counts_plr[counts_plr[,OG]!=0 & counts_plr[,OG]!=Ns[OG],] <- NA } #flip sites where numbers must be flipped flip_idx = which(counts_plr[,OG] > Ns[OG]/2) counts_plr[flip_idx,] <- t(apply(counts[flip_idx,], 1, function(x) Ns-x)) counts_plr } #function to compute ABBA and BABA counts from allele frequencies for four populations get.patterns <- function(p1, p2, p3, pO=NULL){ #assume that if there's no outgroup, it is always fixed ancestral if (is.null(pO)==TRUE | length(pO) == 0) pO <- 0 data.frame(ABBA = (1 - p1) * p2 * p3 * (1-pO), BABA = p1 * (1 - p2) * p3 * (1-pO), ABBA_BAAB = (1 - p1) * p2 * p3 * (1-pO) + p1 * (1-p2) * (1-p3) * pO, BABA_ABAB = p1 * (1 - p2) * p3 * (1-pO) + (1-p1) * p2 * (1-p3) * pO, ABBA_f = (1 - p1) * p3 * p3 * (1-pO), BABA_f = p1 * (1-p3) * p3 * (1-pO), ABBA_BAAB_f = (1 - p1) * p3 * p3 * (1-pO) + p1 * (1-p3)**2 * pO, BABA_ABAB_f = p1 * (1 - p3) * p3 * (1-pO) + (1-p1) * p3 * (1-p3) * pO ) } # Most functiona here use a table format for the SFS, this is more suitable for a large, sparce array # This function converts to the more conventional NxN(xN...) array. sfs.table.to.array <- function(sfs_table, dims=NULL, count_col=NULL){ if (is.null(dims)==TRUE) dims <- apply(sfs_table[,-ncol(sfs_table)], 2, max) + 1 ndim <- length(dims) if (is.null(count_col)==TRUE) count_col <- ndim+1 arr <- array(dim=dims) for (i in 1:nrow(sfs_table)){ arr[as.matrix(sfs_table[i,-count_col])+1] <- sfs_table[i,count_col] } arr } ################################################ plotting functions plotDFS <- function(DFS, weights, method="lines", ylim=c(-1,1), show_D=TRUE, col="black", col_D="black", width_scale=100, no_xlab=FALSE, add=FALSE){ if (method == "lines"){ N = length(DFS) if (add == FALSE){ plot(0, xlim = c(1,N), ylim = ylim, cex=0, xlab = "", ylab = "", xaxt="n", bty="n") abline(h=0) } segments(1:N, 0, 1:N, DFS, lwd = width_scale*weights, lend=1, col=col) } if (method == "bars") barplot(DFS, col= rgb(0,0,0,weights), ylim = ylim, add=add) if (method == "scaled_bars") barplot(DFS*weights, ylim = ylim, add=add) if (no_xlab == FALSE & add == FALSE) mtext(1,text="Derived allele frequency", line = 0) if (add == FALSE) mtext(2,text=expression(italic("D")), line = 2.8, las=2) if (show_D == TRUE) abline(h= sum(DFS * weights), lty = 2, col=col_D) } plot.dcfs <- function(dcfs){ plot(dcfs, type="b") }
R
3D
simonhmartin/dfs
SHINY_sim_DFS_moments/ui.R
.R
6,117
101
library(shiny) library(shinyWidgets) fluidPage( titlePanel("Simulated DFS"), fluidRow( column(2, wellPanel( h2("Add Migration"), h3("Period 2"), checkboxInput("mig_p2", NULL), conditionalPanel(condition = "input.mig_p2 == true", shinyWidgets::sliderTextInput("p2_m12_3", label = "pop12 <- pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p2_m3_12", label = "pop12 -> pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F) ), h3("Period 3.1"), checkboxInput("mig_p31", NULL), conditionalPanel(condition = "input.mig_p31 == true", shinyWidgets::sliderTextInput("p31_m23", label = "pop2 <- pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p31_m32", label = "pop2 -> pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p31_m13", label = "pop1 <- pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p31_m31", label = "pop1 -> pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p31_m12", label = "pop1 <- pop2", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p31_m21", label = "pop1 -> pop2", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F) ), h3("Period 3.2"), checkboxInput("mig_p32", NULL), conditionalPanel(condition = "input.mig_p32 == true", shinyWidgets::sliderTextInput("p32_m23", label = "pop2 <- pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p32_m32", label = "pop2 -> pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p32_m13", label = "pop1 <- pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p32_m31", label = "pop1 -> pop3", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p32_m12", label = "pop1 <- pop2", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F), shinyWidgets::sliderTextInput("p32_m21", label = "pop1 -> pop2", choices = c(seq(0,1,0.1),seq(2,10,1)), selected=0, grid=F) ) ) ), column(2, wellPanel( h2("Modify Population Sizes (relative)"), h3("Period 2"), checkboxInput("mod_N_p2", NULL), conditionalPanel(condition = "input.mod_N_p2 == true", shinyWidgets::sliderTextInput("p2_N12", label = "Pop 1,2 ancestor", choices = c(0.1,0.5,1,5,10), selected=1, grid=F), shinyWidgets::sliderTextInput("p2_N3", label = "Pop 3", choices = c(0.1,0.5,1,5,10), selected=1, grid=F) ), h3("Period 3.1"), checkboxInput("mod_N_p31", NULL), conditionalPanel(condition = "input.mod_N_p31 == true", shinyWidgets::sliderTextInput("p31_N1", label = "Pop 1", choices = c(0.1,0.5,1,5,10), selected=1, grid=F), shinyWidgets::sliderTextInput("p31_N2", label = "Pop 2", choices = c(0.1,0.5,1,5,10), selected=1, grid=F), shinyWidgets::sliderTextInput("p31_N3", label = "Pop 3", choices = c(0.1,0.5,1,5,10), selected=1, grid=F) ), h3("Period 3.2"), checkboxInput("mod_N_p32", NULL), conditionalPanel(condition = "input.mod_N_p32 == true", shinyWidgets::sliderTextInput("p32_N1", label = "Pop 1", choices = c(0.1,0.5,1,5,10), selected=1, grid=F), shinyWidgets::sliderTextInput("p32_N2", label = "Pop 2", choices = c(0.1,0.5,1,5,10), selected=1, grid=F), shinyWidgets::sliderTextInput("p32_N3", label = "Pop 3", choices = c(0.1,0.5,1,5,10), selected=1, grid=F) ) ) ), column(2, wellPanel( h2("Times (2N generations)"), shinyWidgets::sliderTextInput("p2_T", label = "Period 2", choices = c(0,seq(0.01,0.1,0.01),seq(0.2,1,0.1),seq(2,10,1)), selected=0.1, grid=F), shinyWidgets::sliderTextInput("p31_T", label = "Period 3.1", choices = c(0,seq(0.01,0.1,0.01),seq(0.2,1,0.1),seq(2,10,1)), selected=0.1, grid=F), shinyWidgets::sliderTextInput("p32_T", label = "Period 3.2", choices = c(0,seq(0.01,0.1,0.01),seq(0.2,1,0.1),seq(2,10,1)), selected=0.1, grid=F) ), wellPanel( h2("Sampling"), numericInput("n1_2", label = "Samples from pops 1 & 2", value = 30), numericInput("n3", label = "Samples from pop 3", value = 10), checkboxInput("swap13", label = "Swap pops 1 and 3") ) ), column(6, plotOutput("model_plot"), actionButton("go", "Run diffusion approximation and plot DFS!"), plotOutput("DFS_plot"), checkboxInput("dcfs", "Plot dcfs"), conditionalPanel(condition = "input.dcfs == true", plotOutput("dcfs_plot") ), wellPanel( textInput("prefix", label = "File prefix for saving", value = "DFS_simulation"), actionButton("save_files", "Save plots and data") ) ) ) )
R
3D
ding-lab/mushroom
setup.py
.py
1,796
65
from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md')) as f: long_description = f.read() setup( # $ pip install mushroom name='mushroom', version='0.0.4', description='A Python library for clustering and analysis of multi-modal 3D serial section experiments.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/dinglab/mushroom', author='Erik Storrs', author_email='estorrs@wustl.edu', classifiers=[ 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', ], keywords='multiplex imaging codex neighborhood analysis image segmentation visualization mibi codex phenocycler mihc hyperion', packages=find_packages(), python_requires='>=3.8', install_requires=[ 'scanpy', 'squidpy', 'seaborn', 'tifffile', 'ome-types', 'imagecodecs>=2022.7.27', 'scikit-image', 'scikit-learn', 'torch==2.0.1', 'torchio', 'torchvision==0.15.2', 'tensorboard', 'tensorboardX', 'lightning', 'vit-pytorch', 'einops', 'wandb', 'timm', 'leidenalg', 'igraph', 'spatialdata==0.1.2', 'pydantic-extra-types', ], extras_require={ 'viz': [ 'napari[all]', ], }, include_package_data=True, entry_points={ 'console_scripts': [ # 'mushroom=mushroom.mushroom:main', ], }, )
Python
3D
ding-lab/mushroom
mushroom/__init__.py
.py
0
0
null
Python
3D
ding-lab/mushroom
mushroom/utils.py
.py
8,435
254
import collections.abc import os import re import numpy as np import matplotlib.pyplot as plt import tifffile import torch import torch.nn.functional as F import torchvision.transforms.functional as TF from torchio.transforms import Resize from einops import rearrange from sklearn.cluster import AgglomerativeClustering CHARS = 'abcdefghijklmnopqrstuvwxyz' DEFAULT_KERNEL_3 = torch.full((3,3,3), .25) DEFAULT_KERNEL_3[2, 2, 2] = 1. DEFAULT_KERNEL_5 = torch.full((5,5,5), .1) DEFAULT_KERNEL_5[1:-1, 1:-1, 1:-1] = .25 DEFAULT_KERNEL_5[2, 2, 2] = 1. DEFAULT_KERNEL_7 = torch.full((7,7,7), .05) DEFAULT_KERNEL_7[1:-1, 1:-1, 1:-1] = .1 DEFAULT_KERNEL_7[2:-2, 2:-2, 2:-2] = .25 DEFAULT_KERNEL_7[3, 3, 3] = 1. DEFAULT_KERNELS = { 3: DEFAULT_KERNEL_3, 5: DEFAULT_KERNEL_5, 7: DEFAULT_KERNEL_7 } DTYPES = ('multiplex', 'xenium', 'visium', 'he', 'cosmx', 'points',) def listfiles(folder, regex=None): """Return all files with the given regex in the given folder structure""" for root, folders, files in os.walk(folder): for filename in folders + files: if regex is None: yield os.path.join(root, filename) elif re.findall(regex, os.path.join(root, filename)): yield os.path.join(root, filename) # https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth def recursive_update(d, u): for k, v in u.items(): if isinstance(v, collections.abc.Mapping): d[k] = recursive_update(d.get(k, {}), v) else: d[k] = v return d def parse_dtype(dtype_identifier): if '_' not in dtype_identifier: parsed = dtype_identifier else: parsed = dtype_identifier.split('_')[-1] if parsed not in DTYPES: raise RuntimeError(f'{dtype_identifier} was supplied and its parsed form {parsed} is not a valid dtype string. valid data identifiers are either {DTYPES} or "[string]_[dtype]" where [string] can be any filepath-safe string and [dtype] must in {DTYPES}') return parsed def smooth_probabilities(probs, kernel=None, kernel_size=5): """ probs - (n h w labels) kernel - (k, k, k) where k is kernel size """ if kernel is None and kernel_size is None: return probs if kernel is None: kernel = DEFAULT_KERNELS[5] is_numpy = isinstance(probs, np.ndarray) if is_numpy: probs = torch.tensor(probs) stamp = rearrange(kernel, '... -> 1 1 1 1 ...') convs = [] for prob in probs: # pad so we end up with the right shape kernel_size = kernel.shape[0] pad = tuple([kernel_size // 2 for i in range(6)]) prob = rearrange( F.pad(rearrange(prob, 'n h w c -> c n h w'), pad=pad, mode='replicate'), 'c n h w -> n h w c' ) prob = prob.unfold(0, kernel_size, 1) prob = prob.unfold(1, kernel_size, 1) prob = prob.unfold(2, kernel_size, 1) out = (prob * stamp).sum(dim=(-3, -2, -1)) out /= out.max() if is_numpy: out = out.numpy() convs.append(out) return convs def get_interpolated_volume(stacked, section_positions, method='label_gaussian'): """ section_positions - slide indices stacked - (n h w) or (c n h w) """ section_positions = np.asarray(section_positions) section_range = (section_positions.min(), section_positions.max()) squeeze = False if len(stacked.shape) == 3: stacked = rearrange(stacked, 'n h w -> 1 n h w') squeeze = True interp_volume = np.zeros((stacked.shape[0], section_range[-1] + 1, stacked.shape[-2], stacked.shape[-1]), dtype=stacked.dtype) for i in range(stacked.shape[1] - 1): l1, l2 = section_positions[i], section_positions[i+1] stack = stacked[:, i:i+2] transform = Resize((l2 - l1, stack.shape[-2], stack.shape[-1]), image_interpolation=method) resized = transform(stack) interp_volume[:, l1:l2] = resized # add last section interp_volume[:, -1] = stacked[:, -1] if squeeze: interp_volume = rearrange(interp_volume, '1 n h w -> n h w') return interp_volume def smoosh(*args): new = 0 for i, val in enumerate(args): new += val * 10**i return new def relabel(labels): new = torch.zeros_like(labels, dtype=labels.dtype) ids = labels.unique() for i in range(len(ids)): new[labels==ids[i]] = i return new # def label_agg_clusters(clusters): # smooshed = np.vectorize(smoosh)(*clusters) # relabeled = relabel(torch.tensor(smooshed)).numpy() # mapping = {relabeled[s, r, c]:tuple([x[s, r, c].item() for x in clusters]) for s in range(relabeled.shape[0]) for r in range(relabeled.shape[1]) for c in range(relabeled.shape[2])} # return relabeled, mapping def label_agg_clusters(agg_clusters): aggs = np.stack(agg_clusters) aggs = rearrange(aggs, 'z n h w -> (n h w) z') aggs = np.unique(aggs, axis=0) agg_to_label = {tuple(agg):i for i, agg in enumerate(aggs)} label_to_agg = {v:k for k, v in agg_to_label.items()} def assign_labels(*args): return agg_to_label[tuple(args)] relabeled = np.vectorize(assign_labels)(*agg_clusters) return relabeled, label_to_agg def aggregate_clusters(df, cluster_ids, n_clusters=10, distance_threshold=None): clustering = AgglomerativeClustering( n_clusters=n_clusters, distance_threshold=distance_threshold ).fit(df.values) cluster_to_label = {c:l for c, l in zip(df.index.to_list(), clustering.labels_)} agg_ids = np.vectorize(cluster_to_label.get)(cluster_ids) return cluster_to_label, agg_ids def display_thresholds(cuts, cluster_ids, intensity_df, channel): nrows, ncols = len(cuts), cluster_ids.shape[0] fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(ncols, nrows)) for cut_idx in range(nrows): cut = cuts[cut_idx] clusters = intensity_df[intensity_df[channel] >= cut].index.to_list() for section_idx in range(ncols): ax = axs[cut_idx, section_idx] masks = np.stack(cluster_ids[section_idx]==cluster for cluster in clusters) mask = masks.sum(0) > 0 ax.imshow(mask) ax.set_xticks([]) ax.set_yticks([]) axs[cut_idx, 0].set_ylabel("%.2f" % cut, rotation=90) return axs def rescale(x, scale=.1, size=None, dim_order='h w c', target_dtype=torch.uint8, antialias=True, interpolation=TF.InterpolationMode.BILINEAR): is_tensor = isinstance(x, torch.Tensor) if not is_tensor: x = torch.tensor(x) if dim_order == 'h w c': x = rearrange(x, 'h w c -> c h w') elif dim_order == 'h w': x = rearrange(x, 'h w -> 1 h w') if size is None: size = (int(x.shape[-2] * scale), int(x.shape[-1] * scale)) dtype_map = { # quick and dirty dtype mapping for common np dtypes np.dtype(np.uint8): torch.uint8, np.dtype(np.float32): torch.float32, np.dtype(np.float64): torch.float64, np.dtype(np.int64): torch.int64, np.dtype(bool): torch.bool, np.uint8: torch.uint8, np.float32: torch.float32, np.float64: torch.float64, np.int64: torch.int64, bool: torch.bool, } target_dtype = dtype_map.get(np.dtype(target_dtype) if isinstance(target_dtype, np.dtype) else target_dtype, target_dtype) x = TF.resize(x, size, antialias=antialias, interpolation=interpolation) # really need to rewrite this in a sane way if x.dtype not in [torch.long, torch.int64, torch.int32, torch.bool, torch.float32, torch.float64] and x.dtype != target_dtype: # if its a labeled image this wont work x = TF.convert_image_dtype(x, target_dtype) if dim_order == 'h w c': x = rearrange(x, 'c h w -> h w c') elif dim_order == 'h w': x = rearrange(x, '1 h w -> h w') if not is_tensor: x = x.numpy() return x def read_mask(mask): if isinstance(mask, str): ext = mask.split('/')[-1].split('.')[-1] if ext == 'tif': mask = tifffile.imread(mask) elif ext in ['npy', 'npz']: mask = np.load(mask) else: raise RuntimeError(f'Extension {ext} is not supported for masks') if mask is not None: return mask > 0
Python
3D
ding-lab/mushroom
mushroom/mushroom.py
.py
38,941
868
import logging import pickle import os import re from copy import deepcopy from pathlib import Path import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch import torch.nn.functional as F import torchvision.transforms.functional as TF import yaml from einops import rearrange, repeat from torch.utils.data import DataLoader from torchio.transforms import Resize from lightning.pytorch import loggers as pl_loggers from lightning.pytorch import Trainer from lightning.pytorch.callbacks import ModelCheckpoint, Callback import mushroom.visualization.utils as vis_utils from mushroom.model.sae import SAEargs from mushroom.model.model import LitSpore, WandbImageCallback, VariableTrainingCallback from mushroom.data.datasets import get_learner_data, construct_training_batch, construct_inference_batch from mushroom.model.integration import integrate_volumes import mushroom.utils as utils DEFAULT_CONFIG = { 'sections': None, # section config 'dtype_to_chkpt': None, # dictionary for data type specific mushroom models 'dtype_specific_params': { 'visium': { 'trainer_kwargs': { 'tiling_method': 'radius', } }, }, 'sae_kwargs': { 'size': 8, 'patch_size': 1, 'encoder_dim': 128, 'codebook_dim': 64, 'num_clusters': (8, 4, 2,), 'dtype_to_decoder_dims': {'multiplex': (256, 128, 64,), 'he': (256, 128, 10,), 'visium': (256, 512, 2048,), 'xenium': (256, 256, 256,), 'cosmx': (256, 512, 1024,), 'points': (256, 512, 1024)}, 'recon_scaler': 1., 'neigh_scaler': .01, }, 'trainer_kwargs': { 'input_resolution': 1., 'target_resolution': .02, # grid width of 50 microns 'pct_expression': .05, 'log_base': np.e, 'tiling_method': 'grid', 'tiling_radius': 1., 'batch_size': 128, 'num_workers': 0, 'devices': 1, 'accelerator': 'cpu', 'max_epochs': 1, 'steps_per_epoch': 1000, 'lr': 1e-4, 'out_dir': './outputs', 'save_every': 1, 'log_every_n_steps': 10, 'logger_type': 'tensorboard', 'logger_project': 'portobello', 'channel_mapping': {}, 'data_mask': None, }, } class Mushroom(object): def __init__( self, sections, dtype_to_chkpt=None, dtype_specific_params=None, sae_kwargs=None, trainer_kwargs=None, ): self.sections = sections self.dtype_to_chkpt = dtype_to_chkpt self.dtype_specific_params = dtype_specific_params self.sae_kwargs = sae_kwargs self.trainer_kwargs = trainer_kwargs self.input_ppm = self.trainer_kwargs['input_resolution'] self.target_ppm = self.trainer_kwargs['target_resolution'] self.section_ids = [(entry['sid'], d['dtype']) for entry in sections for d in entry['data']] self.dtypes = sorted({x for _, x in self.section_ids}) self.dtype_to_spore = {} for dtype in self.dtypes: logging.info(f'loading spore for {dtype}') dtype_sections = [entry for entry in deepcopy(sections) if dtype in [item['dtype'] for item in entry['data']]] for i, entry in enumerate(dtype_sections): entry['data'] = [item for item in entry['data'] if item['dtype'] == dtype] dtype_sections[i] = entry out_dir = self.trainer_kwargs['out_dir'] trainer_kwargs['save_dir'] = os.path.join(out_dir, f'{dtype}_chkpts') trainer_kwargs['log_dir'] = os.path.join(out_dir, f'{dtype}_logs') chkpt_filepath = self.dtype_to_chkpt[dtype] if self.dtype_to_chkpt is not None else None spore_sae_kwargs = deepcopy(sae_kwargs) spore_trainer_kwargs = deepcopy(trainer_kwargs) if self.dtype_specific_params is not None: to_update = self.dtype_specific_params.get(dtype, {}) if 'sae_kwargs' in to_update: spore_sae_kwargs = utils.recursive_update(spore_sae_kwargs, to_update['sae_kwargs']) if 'trainer_kwargs' in to_update: spore_trainer_kwargs = utils.recursive_update(spore_trainer_kwargs, to_update['trainer_kwargs']) spore = Spore(dtype_sections, chkpt_filepath=chkpt_filepath, sae_kwargs=spore_sae_kwargs, trainer_kwargs=spore_trainer_kwargs) self.dtype_to_spore[dtype] = spore self.num_levels = len(self.sae_kwargs['num_clusters']) self.integrated_clusters = None self.dtype_to_volume, self.dtype_to_volume_probs = None, None self.section_positions = None @staticmethod def from_config(input, accelerator=None): if isinstance(input, str): mushroom_config = os.path.join(input, 'config.yaml') if os.path.exists(os.path.join(input, 'outputs.pkl')): outputs = pickle.load(open(os.path.join(input, 'outputs.pkl'), 'rb')) elif os.path.exists(os.path.join(input, 'outputs.npy')): outputs = np.load(os.path.join(input, 'outputs.npy'), allow_pickle=True).flat[0] else: outputs = None mushroom_config = yaml.safe_load(open(mushroom_config)) else: mushroom_config = input outputs = None # confirm sections are in order of position mushroom_config['sections'] = sorted(mushroom_config['sections'], key=lambda x: x['position']) if accelerator is not None: mushroom_config['trainer_kwargs']['accelerator'] = accelerator mushroom = Mushroom( mushroom_config['sections'], dtype_to_chkpt=mushroom_config['dtype_to_chkpt'], dtype_specific_params=mushroom_config['dtype_specific_params'], sae_kwargs=mushroom_config['sae_kwargs'], trainer_kwargs=mushroom_config['trainer_kwargs'], ) if mushroom_config['dtype_to_chkpt'] is not None: logging.info(f'chkpt files detected, embedding to spores') mushroom.embed_sections() if outputs is not None: mushroom.section_positions = outputs['section_positions'] mushroom.section_ids = outputs['section_ids'] mushroom.dtype_to_volume = outputs['dtype_to_volume'] mushroom.dtype_to_volume_probs = outputs['dtype_to_volume_probs'] mushroom.integrated_clusters = outputs['dtype_to_clusters']['integrated'] return mushroom def train(self, dtypes=None): dtypes = dtypes if dtypes is not None else self.dtypes if self.dtype_to_chkpt is None: self.dtype_to_chkpt = {} for dtype in dtypes: logging.info(f'starting training for {dtype}') spore = self.dtype_to_spore[dtype] spore.train() # save chkpts chkpt_dir = os.path.join(self.trainer_kwargs['out_dir'], f'{dtype}_chkpts') fps = [fp for fp in os.listdir(chkpt_dir) if 'last' in fp] if len(fps) == 1: chkpt_fp = os.path.join(chkpt_dir, 'last.ckpt') else: val = np.max([int(re.sub(r'^last-v([0-9]+).ckpt$', r'\1', fp)) for fp in fps if 'last-v' in fp]) chkpt_fp = os.path.join(chkpt_dir, f'last-v{val}.ckpt') logging.info(f'finished training {dtype}, saved chkpt to {chkpt_fp}') self.dtype_to_chkpt[dtype] = chkpt_fp def embed_sections(self, dtypes=None): dtypes = dtypes if dtypes is not None else self.dtypes for dtype in dtypes: logging.info(f'embedding {dtype} spore') spore = self.dtype_to_spore[dtype] spore.embed_sections() # make sure all spores are the same neighborhood resolution sizes = [spore.clusters[0].shape[-2:] for spore in self.dtype_to_spore.values()] idx = np.argmax([x[0] for x in sizes]) size = sizes[idx] for dtype in dtypes: spore = self.dtype_to_spore[dtype] spore.resize_clusters(self, size=size) def save(self, output_dir=None): if output_dir is None: output_dir = self.trainer_kwargs['out_dir'] Path(output_dir).mkdir(parents=True, exist_ok=True) logging.info(f'saving config and outputs to {output_dir}') config = { 'sections': self.sections, 'dtype_to_chkpt': self.dtype_to_chkpt, 'dtype_specific_params': self.dtype_specific_params, 'sae_kwargs': self.sae_kwargs, 'trainer_kwargs': self.trainer_kwargs } # clusters and cluster probs dtype_to_clusters = { 'integrated': self.integrated_clusters, } dtype_to_cluster_probs, dtype_to_cluster_probs_all, dtype_to_cluster_to_agg = {}, {}, {} for dtype, spore in self.dtype_to_spore.items(): dtype_to_clusters[dtype] = spore.clusters dtype_to_cluster_probs[dtype] = spore.cluster_probs dtype_to_cluster_probs_all[dtype] = spore.cluster_probs_all dtype_to_cluster_to_agg[dtype] = spore.cluster_to_agg # cluster intensities dtype_to_cluster_intensities = { 'dtype_specific': [ self.calculate_cluster_intensities(level=level) for level in range(len(next(iter(self.dtype_to_spore.values())).clusters)) ] } if self.dtype_to_volume is not None: dtype_to_cluster_intensities['dtype_projections'] = { dtype: [ self.calculate_cluster_intensities(level=level, projection_dtype=dtype) if self.integrated_clusters[level] is not None else None for level in range(self.num_levels) ] for dtype, volume in self.dtype_to_volume.items() } try: dtype_to_cluster_intensities['integrated'] = [ self.calculate_cluster_intensities(level=level, projection_dtype='integrated') if self.integrated_clusters[level] is not None else None for level in range(len(self.integrated_clusters)) ] except KeyError: logging.info('no integrated clusters found') outputs = { 'section_positions': self.section_positions, 'section_ids': self.section_ids, 'dtype_to_volume': self.dtype_to_volume, 'dtype_to_volume_probs': self.dtype_to_volume_probs, 'dtype_to_clusters': dtype_to_clusters, 'dtype_to_cluster_probs': dtype_to_cluster_probs, 'dtype_to_cluster_probs_all': dtype_to_cluster_probs_all, 'dtype_to_cluster_intensities': dtype_to_cluster_intensities, 'dtype_to_cluster_to_agg': dtype_to_cluster_to_agg } # yaml doesn't like to save path objects config['trainer_kwargs']['out_dir'] = str(config['trainer_kwargs']['out_dir']) yaml.safe_dump( config, open(os.path.join(output_dir, f'config.yaml'), 'w') ) pickle.dump(outputs, open(os.path.join(output_dir, f'outputs.pkl'), 'wb'), protocol=4) def calculate_cluster_intensities(self, use_predicted=True, level=-1, projection_dtype=None, dtype_to_volume=None): if projection_dtype is not None: assert self.dtype_to_volume is not None, 'Must generate volume first' if dtype_to_volume is None: dtype_to_volume = self.dtype_to_volume dtype_to_df = {} for dtype, spore in self.dtype_to_spore.items(): if projection_dtype is None: dtype_to_df[dtype] = spore.get_cluster_intensities(use_predicted=use_predicted, level=level)[dtype] else: clusters = np.stack([dtype_to_volume[projection_dtype][i] for i in self.section_positions]) input_clusters = np.stack([c for c, (_, dt) in zip(clusters, self.section_ids) if dt==dtype]) input_clusters = [input_clusters for i in range(self.num_levels)] dtype_to_df[dtype] = spore.get_cluster_intensities(use_predicted=use_predicted, level=level, input_clusters=input_clusters)[dtype] return dtype_to_df def generate_interpolated_volumes(self, z_scaler=.1, level=-1, use_probs=True, integrate=True, dist_thresh=.4, n_iterations=10, resolution=2., dtype_to_weight=None, kernel=None, kernel_size=None, gene_idx=None): dtypes, spores = zip(*self.dtype_to_spore.items()) if self.integrated_clusters is None: self.integrated_clusters = [None for i in range(len(next(iter(self.dtype_to_spore.values())).clusters))] section_positions = [] sids = [] for spore in spores: section_positions += [entry['position'] for entry in spore.sections] sids += spore.section_ids section_positions, sids = zip(*sorted([(p, tup) for p, tup in zip(section_positions, sids)], key=lambda x: x[0])) section_positions = (np.asarray(section_positions) * z_scaler).astype(int) for i, (val, (ident, dtype)) in enumerate(zip(section_positions, sids)): if i > 0: old = section_positions[i-1] old_ident = sids[i-1][0] if old == val and old_ident != ident: section_positions[i:] = section_positions[i:] + 1 start, stop = section_positions[0], section_positions[-1] dtype_to_volume = {} for dtype, spore in zip(dtypes, spores): logging.info(f'generating volume for {dtype} spore') positions = [p for p, (_, dt) in zip(section_positions, sids) if dt==dtype] if gene_idx is not None: pass elif use_probs: clusters = spore.cluster_probs[level].copy() else: clusters = spore.clusters[level].copy() if positions[0] != start: positions.insert(0, start) clusters = np.concatenate((clusters[:1], clusters)) if positions[-1] != stop: positions.append(stop) clusters = np.concatenate((clusters, clusters[-1:])) if use_probs: clusters = rearrange(clusters, 'n h w c -> c n h w') volume = utils.get_interpolated_volume(clusters, positions, method='linear') volume = rearrange(volume, 'c n h w -> n h w c') else: volume = utils.get_interpolated_volume(clusters, positions) dtype_to_volume[dtype] = volume if integrate: logging.info(f'generating integrated volume') dtype_to_cluster_intensities = self.calculate_cluster_intensities(level=level) integrated = integrate_volumes(dtype_to_volume, dtype_to_cluster_intensities, are_probs=use_probs, dist_thresh=dist_thresh, n_iterations=n_iterations, resolution=resolution, dtype_to_weight=dtype_to_weight, kernel=kernel, kernel_size=kernel_size) logging.info(f'finished integration, found {integrated.max()} clusters') dtype_to_volume['integrated'] = integrated self.integrated_clusters[level] = np.stack([integrated[i] for i in section_positions]) if use_probs: self.dtype_to_volume_probs = dtype_to_volume self.dtype_to_volume = {dtype:probs.argmax(-1) if dtype!='integrated' else probs for dtype, probs in self.dtype_to_volume_probs.items()} else: self.dtype_to_volume = dtype_to_volume self.section_positions = section_positions return dtype_to_volume def display_predicted_pixels(self, dtype, channel, level=-1, figsize=None): spore = self.dtype_to_spore[dtype] return spore.display_predicted_pixels(channel, dtype, level=level, figsize=figsize) def display_cluster_probs(self, dtype, level=-1, return_axs=False): if dtype == 'integrated': raise RuntimeError(f'Probabilities are not caclulated for integrated clusters') else: spore = self.dtype_to_spore[dtype] return spore.display_cluster_probs(level=level, return_axs=return_axs) def display_clusters(self, dtype, level=-1, section_idxs=None, section_ids=None, cmap=None, figsize=None, horizontal=True, preserve_indices=True, return_axs=False, use_hierarchy=True, discard_max=False): if dtype == 'integrated': clusters = self.integrated_clusters[level] label_to_hierarchy = None else: clusters = self.dtype_to_spore[dtype].clusters[level] label_to_hierarchy = self.dtype_to_spore[dtype].cluster_to_agg[level] if not use_hierarchy: label_to_hierarchy = None if section_ids is None and section_idxs is None: return vis_utils.display_clusters( clusters, cmap=cmap, figsize=figsize, horizontal=horizontal, preserve_indices=preserve_indices, return_axs=return_axs, label_to_hierarchy=label_to_hierarchy, discard_max=discard_max) else: if section_idxs is None: sids = self.section_ids if dtype == 'intergrated' else [sid for sid in self.section_ids if sid[1] == dtype] section_idxs = [i for i, sid in enumerate(sids) if sid in section_ids] return vis_utils.display_clusters( clusters[section_idxs], cmap=cmap, figsize=figsize, horizontal=horizontal, preserve_indices=preserve_indices, return_axs=return_axs, label_to_hierarchy=label_to_hierarchy, discard_max=discard_max) def display_volumes(self, positions=None, dtype_to_volume=None, figsize=None, return_axs=False, level=None): if dtype_to_volume is None: assert self.dtype_to_volume is not None, f'need to run generate_interpolated_volumes first' dtype_to_volume = self.dtype_to_volume dtypes, volumes = zip(*dtype_to_volume.items()) if positions is not None: volumes = [v[positions] for v in volumes] ncols = len(volumes) nrows = volumes[0].shape[0] if figsize is None: figsize = (ncols, nrows) fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) if nrows == 1: axs = rearrange(axs, 'n -> 1 n') if ncols == 1 and nrows != 1: axs = rearrange(axs, 'n -> n 1') for i in range(volumes[0].shape[0]): for j, volume in enumerate(volumes): ax = axs[i, j] dt = dtypes[j] if dt != 'integrated' and level is not None: label_to_hierarchy = self.dtype_to_spore[dt].cluster_to_agg[level] else: label_to_hierarchy = None rgb = vis_utils.display_labeled_as_rgb(volume[i], preserve_indices=True, label_to_hierarchy=label_to_hierarchy) ax.imshow(rgb) ax.axis('off') if i==0: ax.set_title(dt) if return_axs: return axs def assign_pts(self, pts, section_id, dtype, level=-1, scale=True, use_volume=False, volume=None): """ pts are (x, y) """ dtype = section_id[1] if dtype is None else dtype if scale: # target_ppm = self.dtype_to_spore[section_id[1]].target_ppm # print(target_ppm) # target_ppm /= 2 # scaler = self.input_ppm / self.target_ppm scaler = 1 / (self.target_ppm / self.input_ppm) pts = pts * scaler pts = pts.astype(int) if dtype == 'integrated': section_idx = self.section_ids.index(section_id) nbhds = self.integrated_clusters[level][section_idx] else: if use_volume: section_idx = self.section_ids.index(section_id) position = self.section_positions[section_idx] if volume is None: nbhds = self.dtype_to_volume[dtype][position] else: nbhds = volume[position] else: spore = self.dtype_to_spore[dtype] section_idx = spore.section_ids.index(section_id) nbhds = spore.clusters[level][section_idx] max_h, max_w = nbhds.shape[0] - 1, nbhds.shape[1] - 1 pts[pts[:, 0] > max_w, 0] = max_w pts[pts[:, 1] > max_h, 1] = max_h labels = nbhds[pts[:, 1], pts[:, 0]] return labels class Spore(object): def __init__( self, sections, chkpt_filepath=None, sae_kwargs=None, trainer_kwargs=None, ): # if singleton section, add a duplicate # will be adjusted back to single section after embedding if len(sections) == 1: logging.info('singleton section detected, creating temporary duplicate') entry = deepcopy(sections[0]) entry['position'] += 1 entry['sid'] = entry['sid'] + '_dup' sections.append(entry) self.is_singleton = True else: self.is_singleton = False self.sections = sections self.chkpt_filepath = chkpt_filepath self.sae_kwargs = sae_kwargs self.trainer_kwargs = trainer_kwargs # extract mask if it's there if 'data_mask' in self.trainer_kwargs: self.data_mask = utils.read_mask(self.trainer_kwargs['data_mask']) self.trainer_kwargs.pop('data_mask') logging.info('data mask detected') else: self.data_mask = None self.channel_mapping = self.trainer_kwargs['channel_mapping'] self.input_ppm = self.trainer_kwargs['input_resolution'] self.target_ppm = self.trainer_kwargs['target_resolution'] self.pct_expression = self.trainer_kwargs['pct_expression'] self.tiling_method = self.trainer_kwargs['tiling_method'] self.tiling_radius = self.trainer_kwargs['tiling_radius'] self.log_base = self.trainer_kwargs['log_base'] self.sae_args = SAEargs(**self.sae_kwargs) if self.sae_kwargs is not None else {} self.size = (self.sae_args.size, self.sae_args.size) self.learner_data = get_learner_data(self.sections, self.input_ppm, self.target_ppm, self.sae_args.size, channel_mapping=self.channel_mapping, pct_expression=self.pct_expression, data_mask=self.data_mask, tiling_method=self.tiling_method, tiling_radius=self.tiling_radius, log_base=self.log_base) self.section_ids = self.learner_data.train_ds.section_ids self.dtypes = self.learner_data.dtypes self.dtype_to_channels = self.learner_data.dtype_to_channels self.batch_size = self.trainer_kwargs['batch_size'] self.num_workers = self.trainer_kwargs['num_workers'] # by default dataset is infinite, change to desired num steps self.learner_data.train_ds.n = self.trainer_kwargs['steps_per_epoch'] * self.batch_size logging.info('creating data loaders') self.train_dl = DataLoader( self.learner_data.train_ds, batch_size=self.batch_size, num_workers=self.num_workers, collate_fn=construct_training_batch ) self.inference_dl = DataLoader( self.learner_data.inference_ds, batch_size=self.batch_size, num_workers=self.num_workers, collate_fn=construct_inference_batch ) self.model = LitSpore( self.sae_args, self.learner_data, lr=self.trainer_kwargs['lr'], total_steps=self.trainer_kwargs['max_epochs'] * self.trainer_kwargs['steps_per_epoch'], ) logging.info('model initialized') Path(self.trainer_kwargs['log_dir']).mkdir(parents=True, exist_ok=True) Path(self.trainer_kwargs['save_dir']).mkdir(parents=True, exist_ok=True) callbacks = [] if self.trainer_kwargs['logger_type'] == 'wandb': logger = pl_loggers.WandbLogger( project=self.trainer_kwargs['logger_project'], save_dir=self.trainer_kwargs['log_dir'], ) logger.experiment.config.update({ 'trainer_kwargs': self.trainer_kwargs, 'sae_kwargs': self.sae_kwargs, 'sections': self.sections }) logging_callback = WandbImageCallback( logger, self.learner_data, self.inference_dl) callbacks.append(logging_callback) else: logger = pl_loggers.TensorBoardLogger( save_dir=self.trainer_kwargs['log_dir'], ) chkpt_callback = ModelCheckpoint( dirpath=self.trainer_kwargs['save_dir'], save_last=True, # save_top_k=-1, every_n_epochs=self.trainer_kwargs['save_every'], ) callbacks.append(chkpt_callback) vt_callback = VariableTrainingCallback() callbacks.append(vt_callback) self.trainer = self.initialize_trainer(logger, callbacks) if self.chkpt_filepath is not None: logging.info(f'loading checkpoint: {self.chkpt_filepath}') map_str = 'cuda' if self.trainer_kwargs['accelerator'] == 'gpu' else 'cpu' state_dict = torch.load(self.chkpt_filepath, map_location=torch.device(map_str))['state_dict'] self.model.load_state_dict(state_dict) self.predicted_pixels, self.scaled_predicted_pixels = None, None self.true_pixels, self.scaled_true_pixels = None, None self.clusters, self.agg_clusters, self.cluster_probs_agg = None, None, None self.cluster_probs, self.cluster_probs_all = None, None self.cluster_to_agg = None @staticmethod def from_config(config, chkpt_filepath=None, accelerator=None): if isinstance(config, str): config = yaml.safe_load(open(config)) if accelerator is not None: config['trainer_kwargs']['accelerator'] = 'cpu' spore = Spore( config['sections'], sae_kwargs=config['sae_kwargs'], trainer_kwargs=config['trainer_kwargs'], chkpt_filepath=chkpt_filepath ) return spore def _calculate_probs(self): n_levels = len(self.clusters) # probs for all clusters level_to_probs_all = [] for level in range(n_levels): chars = utils.CHARS[:level + 1] ein_exp = ','.join([f'nhw{x}' for x in chars]) ein_exp += f'->nhw{chars}' probs = np.einsum(ein_exp, *self.cluster_probs_agg[:level + 1]) if level: new_probs = np.zeros_like(probs) for label, cluster in self.cluster_to_agg[level].items(): mask = self.clusters[level]==label values = probs[mask] # (n, z) empty = np.zeros_like(values) selections = tuple([slice(None)] + list(cluster)[:-1]) empty[selections] = values[selections] new_probs[mask] = empty else: new_probs = probs level_to_probs_all.append(new_probs) # probs for labeled clusters only level_to_probs = [] for level in range(n_levels): probs = level_to_probs_all[level] if level: labeled_probs = np.zeros((probs.shape[0], probs.shape[1], probs.shape[2], len(self.cluster_to_agg[level]))) for label, cluster in self.cluster_to_agg[level].items(): mask = self.clusters[level]==label values = probs[mask] # (n, a, b, c) window = labeled_probs[mask] labels, cs = zip(*[(l, c) for l, c in self.cluster_to_agg[level].items() if list(c)[:-1] == list(cluster)[:-1]]) cs = np.asarray(cs) tups = [tuple(cs[:, i]) for i in range(cs.shape[1])] selections = tuple([slice(None)] + tups) window[:, labels] = values[selections] labeled_probs[mask] = window else: labels = np.unique(probs.argmax(axis=-1)) labeled_probs = probs[..., labels] level_to_probs.append(labeled_probs) return level_to_probs, level_to_probs_all def initialize_trainer( self, logger, callbacks ): return Trainer( devices=self.trainer_kwargs['devices'], accelerator=self.trainer_kwargs['accelerator'], enable_checkpointing=True, log_every_n_steps=self.trainer_kwargs['log_every_n_steps'], max_epochs=self.trainer_kwargs['max_epochs'], callbacks=callbacks, logger=logger ) def train(self): self.trainer.fit(self.model, self.train_dl) def embed_sections(self): n = len(self.section_ids) if self.is_singleton: self.section_ids = self.section_ids[:-1] self.sections = self.sections[:-1] n -= 1 outputs = self.trainer.predict(self.model, self.inference_dl) formatted = self.model.format_prediction_outputs(outputs) self.predicted_pixels = [[z.cpu().clone().detach().numpy() for z in x[:n]] for x in formatted['predicted_pixels']] # [level][n](h w c) self.true_pixels = [x.cpu().clone().detach().numpy() for x in formatted['true_pixels'][:n]] # [n](h w c) self.clusters = [x[:n] for x in formatted['clusters']] # [level](n h w) self.agg_clusters = [x[:n].cpu().clone().detach().numpy().astype(int) for x in formatted['agg_clusters']] # [level](n h w) self.cluster_probs_agg = [x[:n].cpu().clone().detach().numpy() for x in formatted['cluster_probs']] # [level](n h w c) where c is n clusters for that level self.cluster_to_agg = [x for x in formatted['label_to_original']] # [level]d where d is dict mapping cluster label to original cluster # self.cluster_probs_all - [level](n, h, w, *) where * is number of dims equal to num clusters for each level # self.cluster_probs - [level](n, h, w, c) where c is total number of clusters in level self.cluster_probs, self.cluster_probs_all = self._calculate_probs() self.cluster_probs_all = [x[:n] for x in self.cluster_probs_all] self.cluster_probs = [x[:n] for x in self.cluster_probs] def resize_clusters(self, scale=1., size=None): if size is None: size = (int(self.clusters[0].shape[-2] * scale), int(self.clusters[0].shape[-1] * scale)) self.true_pixels = [utils.rescale(x, size=size, dim_order='h w c', target_dtype=x.dtype) for x in self.true_pixels] for level in range(len(self.clusters)): self.predicted_pixels[level] = [ utils.rescale(x, size=size, dim_order='h w c', target_dtype=x.dtype) for x in self.predicted_pixels[level]] self.clusters[level] = utils.rescale(self.clusters[level], size=size, dim_order='c h w', target_dtype=self.clusters[level].dtype, antialias=False, interpolation=TF.InterpolationMode.NEAREST) self.agg_clusters[level] = utils.rescale(self.agg_clusters[level], size=size, dim_order='c h w', target_dtype=self.agg_clusters[level].dtype, antialias=False, interpolation=TF.InterpolationMode.NEAREST) self.cluster_probs_agg[level] = rearrange(utils.rescale(rearrange(self.cluster_probs_agg[level], 'n h w c -> n c h w'), size=size, dim_order='n c h w', target_dtype=self.cluster_probs_agg[level].dtype), 'n c h w -> n h w c') self.cluster_probs[level] = rearrange(utils.rescale(rearrange(self.cluster_probs[level], 'n h w c -> n c h w'), size=size, dim_order='n c h w', target_dtype=self.cluster_probs[level].dtype), 'n c h w -> n h w c') kwargs = {c:i for c, i in zip(utils.CHARS, self.cluster_probs_all[level].shape[3:])} chars = utils.CHARS[:len(kwargs)] char_str = ' '.join(chars) self.cluster_probs_all[level] = rearrange(utils.rescale(rearrange(self.cluster_probs_all[level], 'n h w ... -> n (...) h w'), size=size, dim_order='n c* h w', target_dtype=self.cluster_probs_all[level].dtype), f'n ({char_str}) h w -> n h w {char_str}', **kwargs) def get_cluster_intensities(self, use_predicted=True, level=-1, input_clusters=None): if input_clusters is None: input_clusters = self.clusters dtype_to_df = {} imgs = self.predicted_pixels[level] if use_predicted else self.true_pixels for dtype in self.dtypes: sections, clusters = [], [] for (sid, dt), img, labeled in zip(self.section_ids, imgs, input_clusters[level]): if dt == dtype: sections.append(img) clusters.append(labeled) sections = np.stack(sections) # (n, h, w, c) clusters = np.stack(clusters) # (n, h, w) data = [] for cluster in np.unique(clusters): mask = clusters==cluster data.append(sections[mask, :].mean(axis=0)) df = pd.DataFrame(data=data, columns=self.learner_data.dtype_to_channels[dtype], index=np.unique(clusters)) dtype_to_df[dtype] = df return dtype_to_df def generate_interpolated_volume(self, z_scaler=.1, level=-1, use_probs=False): section_positions = [entry['position'] for entry in self.sections] section_positions = (np.asarray(section_positions) * z_scaler).astype(int) for i, val in enumerate(section_positions): if i > 0: old = section_positions[i-1] if old == val: section_positions[i:] = section_positions[i:] + 1 if use_probs: probs = rearrange(self.cluster_probs[level], 'n h w c -> c n h w') volume = utils.get_interpolated_volume(probs, section_positions, method='linear') volume = rearrange(volume, 'c n h w -> n h w c') else: volume = utils.get_interpolated_volume(self.clusters[level], section_positions, method='label_gaussian') return volume def display_predicted_pixels(self, channel, dtype, level=-1, figsize=None, return_axs=False): if self.predicted_pixels is None: raise RuntimeError( 'Must train model and embed sections before displaying. To embed run .embed_sections()') pred, true, sids = [], [], [] for (sid, dt), pred_imgs, true_imgs in zip(self.section_ids, self.predicted_pixels[level], self.true_pixels): if dt == dtype: pred.append(pred_imgs) true.append(true_imgs) sids.append(sid) pred = np.stack(pred) # (n, h, w, c) true = np.stack(true) # (n, h, w, c) fig, axs = plt.subplots(nrows=2, ncols=pred.shape[0], figsize=figsize) for sid, img, ax in zip(sids, pred, axs[0, :]): ax.imshow(img[..., self.learner_data.dtype_to_channels[dtype].index(channel)]) ax.set_xticks([]) ax.set_yticks([]) ax.set_title(sid) for sid, img, ax in zip(sids, true, axs[1, :]): ax.imshow(img[..., self.learner_data.dtype_to_channels[dtype].index(channel)]) ax.set_xticks([]) ax.set_yticks([]) axs[0, 0].set_ylabel('predicted') axs[1, 0].set_ylabel('true') if return_axs: return axs def display_cluster_probs(self, level=-1, prob_type='clusters', return_axs=False): if prob_type == 'clusters_agg': cluster_probs = self.cluster_probs_agg[level] else: cluster_probs = self.cluster_probs[level] fig, axs = plt.subplots( nrows=cluster_probs.shape[-1], ncols=cluster_probs.shape[0], figsize=(cluster_probs.shape[0], cluster_probs.shape[-1]) ) if cluster_probs.shape[-1] == 1: axs = rearrange(axs, 'n -> 1 n') if cluster_probs.shape[0] == 1 and cluster_probs.shape[-1] != 1: axs = rearrange(axs, 'n -> n 1') for c in range(cluster_probs.shape[0]): for r in range(cluster_probs.shape[-1]): ax = axs[r, c] ax.imshow(cluster_probs[c, ..., r]) ax.set_yticks([]) ax.set_xticks([]) if c == 0: ax.set_ylabel(r, rotation=90) if return_axs: return axs def display_clusters(self, level=-1, section_idxs=None, section_ids=None, cmap=None, figsize=None, horizontal=True, preserve_indices=True, return_axs=False): if section_ids is None and section_idxs is None: return vis_utils.display_clusters( self.clusters[level], cmap=cmap, figsize=figsize, horizontal=horizontal, preserve_indices=preserve_indices, return_axs=return_axs, label_to_hierarchy=self.cluster_to_agg[level]) else: if section_idxs is None: section_idxs = [i for i, sid in enumerate(self.section_ids) if sid in section_ids] return vis_utils.display_clusters( self.clusters[level][section_idxs], cmap=cmap, figsize=figsize, horizontal=horizontal, preserve_indices=preserve_indices, return_axs=return_axs, label_to_hierarchy=self.cluster_to_agg[level]) def assign_pts(self, pts, section_id=None, section_idx=None, level=-1, scale=True): """ pts are (x, y) """ assert section_id is not None or section_idx is not None, f'either section id or section index must be given' if scale: # scaler = self.input_ppm / self.target_ppm scaler = 1 / (self.target_ppm / self.input_ppm) pts = pts / scaler pts = pts.astype(int) section_idx = self.section_ids.index(section_id) if section_id is not None else section_idx nbhds = self.clusters[level][section_idx] max_h, max_w = nbhds.shape[0] - 1, nbhds.shape[1] - 1 pts[pts[:, 0] > max_w, 0] = max_w pts[pts[:, 1] > max_h, 1] = max_h labels = nbhds[pts[:, 1], pts[:, 0]] return labels
Python
3D
ding-lab/mushroom
mushroom/model/model.py
.py
9,921
239
import logging import os from typing import Any, Optional import warnings from pytorch_lightning.utilities.types import STEP_OUTPUT import numpy as np import torch from einops import rearrange, repeat from torch.utils.data import DataLoader from vit_pytorch import ViT from lightning.pytorch import LightningModule from lightning.pytorch.callbacks import Callback from mushroom.model.sae import SAE, SAEargs from mushroom.visualization.utils import display_labeled_as_rgb import mushroom.utils as utils logger = logging.getLogger() logger.setLevel(logging.INFO) warnings.simplefilter(action='ignore', category=FutureWarning) class WandbImageCallback(Callback): def __init__(self, wandb_logger, learner_data, inference_dl): self.logger = wandb_logger self.inference_dl = inference_dl self.learner_data = learner_data self.channel = 0 def on_train_epoch_end(self, trainer, pl_module): outputs = [] with torch.no_grad(): for batch in self.inference_dl: tiles, slides, dtypes = batch['tiles'], batch['slides'], batch['dtypes'] tiles = [x.to(pl_module.device) for x in tiles] slides = [x.to(pl_module.device) for x in slides] dtypes = [x.to(pl_module.device) for x in dtypes] outs = pl_module.forward(tiles, slides, dtypes) # do this properly eventually outs['outputs']['dtype_to_true_pixels'] = {k:v.cpu() for k, v in outs['outputs']['dtype_to_true_pixels'].items()} outs['outputs']['dtype_to_pred_pixels'] = {k:v.cpu() for k, v in outs['outputs']['dtype_to_pred_pixels'].items()} outputs.append(outs) formatted = pl_module.format_prediction_outputs(outputs) predicted_pixels = [[z.cpu().clone().detach().numpy() for z in x] for x in formatted['predicted_pixels']] true_pixels = [x.cpu().clone().detach().numpy() for x in formatted['true_pixels']] clusters = [x for x in formatted['clusters']] for level, imgs in enumerate(predicted_pixels): self.logger.log_image( key=f'predicted pixels {level} {self.channel}', images=[img[..., 0] for img in imgs], caption=[str(i) for i in range(len(imgs))] ) self.logger.log_image( key=f'true pixels {self.channel}', images=[img[..., 0] for img in true_pixels], caption=[str(i) for i in range(len(true_pixels))] ) for level, cs in enumerate(clusters): # print([np.unique(c) for c in cs]) self.logger.log_image( key=f'clusters {level}', images=[display_labeled_as_rgb(labeled, preserve_indices=True) for labeled in cs], caption=[str(i) for i in range(len(cs))] ) class VariableTrainingCallback(Callback): def __init__(self, end_pretraining_at=5): self.end_pretraining_at = end_pretraining_at def on_train_epoch_end(self, trainer, pl_module): if self.end_pretraining_at == pl_module.current_epoch: pass # logging.info(f'stopping pretraining at {self.end_pretraining_at}') # pl_module.sae.end_pretraining() # pass # pl_module.sae.freeze_ # print(f'stoppint pretraining level {self.pretrain_for}') # pl_module.sae.end_pretraining() class LitSpore(LightningModule): def __init__( self, sae_args, learner_data, lr=1e-4, total_steps=1, ): super().__init__() self.image_size = sae_args.size self.patch_size = sae_args.patch_size self.lr = lr self.learner_data = learner_data self.sae_args = sae_args logging.info('creating ViT') encoder = ViT( image_size=self.image_size, patch_size=self.patch_size, num_classes=sae_args.num_classes, dim=sae_args.encoder_dim, depth=sae_args.encoder_depth, heads=sae_args.heads, mlp_dim=sae_args.mlp_dim, ) self.sae = SAE( encoder=encoder, n_slides=len(self.learner_data.train_ds.section_ids), dtypes=self.learner_data.dtypes, dtype_to_n_channels=self.learner_data.dtype_to_n_channels, codebook_dim=self.sae_args.codebook_dim, dtype_to_decoder_dims=self.sae_args.dtype_to_decoder_dims, recon_scaler=sae_args.recon_scaler, neigh_scaler=sae_args.neigh_scaler, total_steps=total_steps ) self.outputs = None def _flatten_outputs(self, outputs): ds = self.learner_data.inference_ds flat_dtypes = [ds.section_ids[sid][1] for sid, *_ in ds.idx_to_coord] n_levels = len(outputs[0]['outputs']['level_to_encoded']) batch_size = len(outputs[0]['outputs']['level_to_encoded']) flat = {} for k in ['encoded_tokens_prequant']: flat[k] = torch.concat([x['outputs'][k][:, 2:] for x in outputs])# skip slide and dtype token for k in ['level_to_encoded', 'cluster_probs', 'clusters']: for level in range(n_levels): flat[f'{k}_{level}'] = torch.concat([x['outputs'][k][level] for x in outputs]) k = 'dtype_to_true_pixels' pool = [v for v in flat_dtypes] flat['true_pixels'] = [] spot = 0 for i, x in enumerate(outputs): batch_size = len(x['outputs']['encoded_tokens_prequant']) dtypes = pool[spot:spot + batch_size] spot += batch_size dtype_to_idx = {dtype:0 for dtype in sorted(set(dtypes))} for dtype in dtypes: idx = dtype_to_idx[dtype] obj = x['outputs'][k][dtype][idx] flat['true_pixels'].append(obj) dtype_to_idx[dtype] += 1 k = 'dtype_to_pred_pixels' # refactor this for level in range(n_levels): pool = [v for v in flat_dtypes] flat[f'pred_pixels_{level}'] = [] spot = 0 for i, x in enumerate(outputs): batch_size = len(x['outputs']['encoded_tokens_prequant']) dtypes = pool[spot:spot + batch_size] spot += batch_size dtype_to_idx = {dtype:0 for dtype in sorted(set(dtypes))} for dtype in dtypes: idx = dtype_to_idx[dtype] obj = x['outputs'][k][dtype][level, idx] flat[f'pred_pixels_{level}'].append(obj) dtype_to_idx[dtype] += 1 return flat def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) return optimizer def training_step(self, batch, batch_idx): tiles, slides, dtypes = batch['tiles'], batch['slides'], batch['dtypes'] pairs, is_anchor = batch['pairs'], batch['is_anchor'] outs = self.forward(tiles, slides, dtypes, pairs=pairs, is_anchor=is_anchor) outs['neigh_scaler'] = self.sae.variable_neigh_scaler.get_scaler() batch_size = sum([v.shape[1] for k, v in outs['outputs']['dtype_to_true_pixels'].items()]) // 2 self.log_dict({f'{k}_step':v for k, v in outs.items() if k!='outputs'}, on_step=True, on_epoch=False, prog_bar=True, batch_size=batch_size) self.log_dict({f'{k}_epoch':v for k, v in outs.items() if k!='outputs'}, on_step=False, on_epoch=True, prog_bar=True, batch_size=batch_size) return outs def predict_step(self, batch): tiles, slides, dtypes = batch['tiles'], batch['slides'], batch['dtypes'] return self.forward(tiles, slides, dtypes) def format_prediction_outputs(self, outputs): ds = self.learner_data.inference_ds n_levels = len(outputs[0]['outputs']['level_to_encoded']) flat = self._flatten_outputs(outputs) clusters = [torch.stack( [ds.section_from_tiles( flat[f'clusters_{level}'].unsqueeze(-1), i ).squeeze(-1) for i in range(len(ds.section_ids))] ).to(torch.long) for level in range(n_levels)] cluster_probs = [torch.stack( [ds.section_from_tiles( flat[f'cluster_probs_{level}'], i ) for i in range(len(ds.section_ids))] ) for level in range(n_levels)] pred_pixels = [ [ds.section_from_tiles( flat[f'pred_pixels_{level}'], i ) for i in range(len(ds.section_ids))] for level in range(n_levels)] true_pixels = [ds.section_from_tiles( flat['true_pixels'], i ) for i in range(len(ds.section_ids))] relabeled_clusters, label_to_orig = zip(*[utils.label_agg_clusters(clusters[:i + 1]) for i in range(len(clusters))]) return { 'predicted_pixels': pred_pixels, # nested list of (h, w, c), length num levels, length num sections 'true_pixels': true_pixels, # list of (h, w, c), length num sections 'clusters': relabeled_clusters, # list of (n, h, w), length num levels 'cluster_probs': cluster_probs, # list of (n, h, w, n_clusters), length num levels 'agg_clusters': clusters, # list of (n h w), length num levels 'label_to_original': label_to_orig # maps labels to original cluster levels } def forward(self, tiles, slides, dtypes, pairs=None, is_anchor=None): losses, outputs = self.sae(tiles, slides, dtypes, pairs=pairs, is_anchor=is_anchor) if 'overall_loss' in losses: losses['loss'] = losses['overall_loss'] losses['outputs'] = outputs return losses
Python