signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def perform_exe_expansion(self):
<EOL>if self.has_section('<STR_LIT>'):<EOL><INDENT>for option, value in self.items('<STR_LIT>'):<EOL><INDENT>newStr = self.interpolate_exe(value)<EOL>if newStr != value:<EOL><INDENT>self.set('<STR_LIT>', option, newStr)<EOL><DEDENT><DEDENT><DEDENT>
This function will look through the executables section of the ConfigParser object and replace any values using macros with full paths. For any values that look like ${which:lalapps_tmpltbank} will be replaced with the equivalent of which(lalapps_tmpltbank) Otherwise values will be unchanged.
f16001:c0:m3
def get_subsections(self, section_name):
<EOL>subsections = [sec[len(section_name)+<NUM_LIT:1>:] for sec in self.sections()if sec.startswith(section_name + '<STR_LIT:->')]<EOL>for sec in subsections:<EOL><INDENT>sp = sec.split('<STR_LIT:->')<EOL>if (len(sp) > <NUM_LIT:1>) and not self.has_section('<STR_LIT>' % (section_name,<EOL>sp[<NUM_LIT:0>])):<EOL><INDENT>raise ValueError( "<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>" % (sec, sp[<NUM_LIT:0>], sp[<NUM_LIT:1>]))<EOL><DEDENT><DEDENT>if len(subsections) > <NUM_LIT:0>:<EOL><INDENT>return [sec.split('<STR_LIT:->')[<NUM_LIT:0>] for sec in subsections]<EOL><DEDENT>elif self.has_section(section_name):<EOL><INDENT>return ['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>return []<EOL><DEDENT>
Return a list of subsections for the given section name
f16001:c0:m5
def perform_extended_interpolation(self):
<EOL>for section in self.sections():<EOL><INDENT>for option,value in self.items(section):<EOL><INDENT>newStr = self.interpolate_string(option, section)<EOL>if newStr != option:<EOL><INDENT>self.set(section,newStr,value)<EOL>self.remove_option(section,option)<EOL><DEDENT>newStr = self.interpolate_string(value, section)<EOL>if newStr != value:<EOL><INDENT>self.set(section,option,newStr)<EOL><DEDENT><DEDENT><DEDENT>
Filter through an ini file and replace all examples of ExtendedInterpolation formatting with the exact value. For values like ${example} this is replaced with the value that corresponds to the option called example ***in the same section*** For values like ${common|example} this is replaced with the value that corresponds to the option example in the section [common]. Note that in the python3 config parser this is ${common:example} but python2.7 interprets the : the same as a = and this breaks things Nested interpolation is not supported here.
f16001:c0:m6
def split_multi_sections(self):
<EOL>for section in self.sections():<EOL><INDENT>if '<STR_LIT:&>' not in section:<EOL><INDENT>continue<EOL><DEDENT>splitSections = section.split('<STR_LIT:&>')<EOL>for newSec in splitSections:<EOL><INDENT>if not self.has_section(newSec):<EOL><INDENT>self.add_section(newSec)<EOL><DEDENT>self.add_options_to_section(newSec, self.items(section))<EOL><DEDENT>self.remove_section(section)<EOL><DEDENT>
Parse through the WorkflowConfigParser instance and splits any sections labelled with an "&" sign (for e.g. [inspiral&tmpltbank]) into [inspiral] and [tmpltbank] sections. If these individual sections already exist they will be appended to. If an option exists in both the [inspiral] and [inspiral&tmpltbank] sections an error will be thrown
f16001:c0:m8
def populate_shared_sections(self):
if not self.has_section('<STR_LIT>'):<EOL><INDENT>return<EOL><DEDENT>for key, value in self.items('<STR_LIT>'):<EOL><INDENT>assert(self.has_section('<STR_LIT>' %(key)))<EOL>values = value.split('<STR_LIT:U+002C>')<EOL>common_options = self.items('<STR_LIT>' %(key))<EOL>for section in values:<EOL><INDENT>if not self.has_section(section):<EOL><INDENT>self.add_section(section)<EOL><DEDENT>for arg, val in common_options:<EOL><INDENT>if arg in self.options(section):<EOL><INDENT>raise ValueError('<STR_LIT>' +'<STR_LIT>' %(section,) +'<STR_LIT>'%(arg,'<STR_LIT>' %(key)))<EOL><DEDENT>self.set(section, arg, val)<EOL><DEDENT><DEDENT>self.remove_section('<STR_LIT>' %(key))<EOL><DEDENT>self.remove_section('<STR_LIT>')<EOL>
Parse the [sharedoptions] section of the ini file. That section should contain entries according to: * massparams = inspiral, tmpltbank * dataparams = tmpltbank This will result in all options in [sharedoptions-massparams] being copied into the [inspiral] and [tmpltbank] sections and the options in [sharedoptions-dataparams] being copited into [tmpltbank]. In the case of duplicates an error will be raised.
f16001:c0:m9
def add_options_to_section(self ,section, items, overwrite_options=False):
<EOL>if not self.has_section(section):<EOL><INDENT>raise ValueError('<STR_LIT>'%(section,))<EOL><DEDENT>for option,value in items:<EOL><INDENT>if not overwrite_options:<EOL><INDENT>if option in self.options(section):<EOL><INDENT>raise ValueError('<STR_LIT>' +'<STR_LIT>' %(section,) +'<STR_LIT>' %(option,))<EOL><DEDENT><DEDENT>self.set(section,option,value)<EOL><DEDENT>
Add a set of options and values to a section of a ConfigParser object. Will throw an error if any of the options being added already exist, this behaviour can be overridden if desired Parameters ---------- section : string The name of the section to add options+values to items : list of tuples Each tuple contains (at [0]) the option and (at [1]) the value to add to the section of the ini file overwrite_options : Boolean, optional By default this function will throw a ValueError if an option exists in both the original section in the ConfigParser *and* in the provided items. This will override so that the options+values given in items will replace the original values if the value is set to True. Default = True
f16001:c0:m10
def sanity_check_subsections(self):
<EOL>for section in self.sections():<EOL><INDENT>if section == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>for section2 in self.sections():<EOL><INDENT>if section2.startswith(section + '<STR_LIT:->'):<EOL><INDENT>self.check_duplicate_options(section, section2,<EOL>raise_error=True)<EOL><DEDENT><DEDENT><DEDENT>
This function goes through the ConfigParset and checks that any options given in the [SECTION_NAME] section are not also given in any [SECTION_NAME-SUBSECTION] sections.
f16001:c0:m11
def check_duplicate_options(self, section1, section2, raise_error=False):
<EOL>if not self.has_section(section1):<EOL><INDENT>raise ValueError('<STR_LIT>'%(section1,) )<EOL><DEDENT>if not self.has_section(section2):<EOL><INDENT>raise ValueError('<STR_LIT>'%(section2,) )<EOL><DEDENT>items1 = self.options(section1)<EOL>items2 = self.options(section2)<EOL>duplicates = [x for x in items1 if x in items2]<EOL>if duplicates and raise_error:<EOL><INDENT>raise ValueError('<STR_LIT>' +'<STR_LIT>'%(section1,section2,'<STR_LIT:U+0020>'.join(duplicates)))<EOL><DEDENT>return duplicates<EOL>
Check for duplicate options in two sections, section1 and section2. Will return a list of the duplicate options. Parameters ---------- section1 : string The name of the first section to compare section2 : string The name of the second section to compare raise_error : Boolean, optional (default=False) If True, raise an error if duplicates are present. Returns ---------- duplicates : List List of duplicate options
f16001:c0:m12
def get_opt_tag(self, section, option, tag):
return self.get_opt_tags(section, option, [tag])<EOL>
Convenience function accessing get_opt_tags() for a single tag: see documentation for that function. NB calling get_opt_tags() directly is preferred for simplicity. Parameters ----------- self : ConfigParser object The ConfigParser object (automatically passed when this is appended to the ConfigParser class) section : string The section of the ConfigParser object to read option : string The ConfigParser option to look for tag : string The name of the subsection to look in, if not found in [section] Returns -------- string The value of the options being searched for
f16001:c0:m13
def get_opt_tags(self, section, option, tags):
<EOL>if tags:<EOL><INDENT>tags = [tag.lower() for tag in tags if tag is not None]<EOL><DEDENT>try:<EOL><INDENT>return self.get(section, option)<EOL><DEDENT>except ConfigParser.Error:<EOL><INDENT>err_string = "<STR_LIT>" %(option,section)<EOL>if not tags:<EOL><INDENT>raise ConfigParser.Error(err_string + "<STR_LIT:.>")<EOL><DEDENT>return_vals = []<EOL>sub_section_list = []<EOL>for sec_len in range(<NUM_LIT:1>, len(tags)+<NUM_LIT:1>):<EOL><INDENT>for tag_permutation in itertools.permutations(tags, sec_len):<EOL><INDENT>joined_name = '<STR_LIT:->'.join(tag_permutation)<EOL>sub_section_list.append(joined_name)<EOL><DEDENT><DEDENT>section_list = ["<STR_LIT>" %(section, sb) for sb in sub_section_list]<EOL>err_section_list = []<EOL>for sub in sub_section_list:<EOL><INDENT>if self.has_section('<STR_LIT>' %(section, sub)):<EOL><INDENT>if self.has_option('<STR_LIT>' %(section, sub), option):<EOL><INDENT>err_section_list.append("<STR_LIT>" %(section, sub))<EOL>return_vals.append(self.get('<STR_LIT>' %(section, sub),<EOL>option))<EOL><DEDENT><DEDENT><DEDENT>if not return_vals:<EOL><INDENT>err_string += "<STR_LIT>"%("<STR_LIT>".join(section_list))<EOL>raise ConfigParser.Error(err_string)<EOL><DEDENT>if len(return_vals) > <NUM_LIT:1>:<EOL><INDENT>err_string += "<STR_LIT>"%("<STR_LIT>".join(err_section_list))<EOL>raise ConfigParser.Error(err_string)<EOL><DEDENT>return return_vals[<NUM_LIT:0>]<EOL><DEDENT>
Supplement to ConfigParser.ConfigParser.get(). This will search for an option in [section] and if it doesn't find it will also try in [section-tag] for every value of tag in tags. Will raise a ConfigParser.Error if it cannot find a value. Parameters ----------- self : ConfigParser object The ConfigParser object (automatically passed when this is appended to the ConfigParser class) section : string The section of the ConfigParser object to read option : string The ConfigParser option to look for tags : list of strings The name of subsections to look in, if not found in [section] Returns -------- string The value of the options being searched for
f16001:c0:m14
def has_option_tag(self, section, option, tag):
return self.has_option_tags(section, option, [tag])<EOL>
Convenience function accessing has_option_tags() for a single tag: see documentation for that function. NB calling has_option_tags() directly is preferred for simplicity. Parameters ----------- self : ConfigParser object The ConfigParser object (automatically passed when this is appended to the ConfigParser class) section : string The section of the ConfigParser object to read option : string The ConfigParser option to look for tag : string The name of the subsection to look in, if not found in [section] Returns -------- Boolean Is the option in the section or [section-tag]
f16001:c0:m15
def has_option_tags(self, section, option, tags):
try:<EOL><INDENT>self.get_opt_tags(section, option, tags)<EOL>return True<EOL><DEDENT>except ConfigParser.Error:<EOL><INDENT>return False<EOL><DEDENT>
Supplement to ConfigParser.ConfigParser.has_option(). This will search for an option in [section] and if it doesn't find it will also try in [section-tag] for each value in tags. Returns True if the option is found and false if not. Parameters ----------- self : ConfigParser object The ConfigParser object (automatically passed when this is appended to the ConfigParser class) section : string The section of the ConfigParser object to read option : string The ConfigParser option to look for tags : list of strings The names of the subsection to look in, if not found in [section] Returns -------- Boolean Is the option in the section or [section-tag] (for tag in tags)
f16001:c0:m16
@staticmethod<EOL><INDENT>def add_config_opts_to_parser(parser):<DEDENT>
parser.add_argument("<STR_LIT>", type=str, nargs="<STR_LIT:+>",<EOL>required=True,<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>", type=str, nargs="<STR_LIT:+>",<EOL>default=None, metavar="<STR_LIT>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>
Adds options for configuration files to the given parser.
f16001:c0:m17
@classmethod<EOL><INDENT>def from_cli(cls, opts):<DEDENT>
<EOL>logging.info("<STR_LIT>")<EOL>if opts.config_overrides is not None:<EOL><INDENT>overrides = [override.split("<STR_LIT::>")<EOL>for override in opts.config_overrides]<EOL><DEDENT>else:<EOL><INDENT>overrides = None<EOL><DEDENT>if opts.config_delete is not None:<EOL><INDENT>deletes = [delete.split("<STR_LIT::>") for delete in opts.config_delete]<EOL><DEDENT>else:<EOL><INDENT>deletes = None<EOL><DEDENT>return cls(opts.config_files, overrides, deleteTuples=deletes)<EOL>
Loads a config file from the given options, with overrides and deletes applied.
f16001:c0:m18
def setup_tmpltbank_workflow(workflow, science_segs, datafind_outs,<EOL>output_dir=None, psd_files=None, tags=None,<EOL>return_format=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>make_analysis_dir(output_dir)<EOL>cp = workflow.cp<EOL>tmpltbankMethod = cp.get_opt_tags("<STR_LIT>", "<STR_LIT>",<EOL>tags)<EOL>if tmpltbankMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>tmplt_banks = setup_tmpltbank_pregenerated(workflow, tags=tags)<EOL><DEDENT>elif tmpltbankMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>if not cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg = "<STR_LIT>"<EOL>logging.warn(errMsg)<EOL><DEDENT>linkToMatchedfltr = True<EOL><DEDENT>else:<EOL><INDENT>linkToMatchedfltr = False<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>if not linkToMatchedfltr:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>if not cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>compatibility_mode = True<EOL><DEDENT>else:<EOL><INDENT>compatibility_mode = False<EOL><DEDENT>tmplt_banks = setup_tmpltbank_dax_generated(workflow, science_segs,<EOL>datafind_outs, output_dir, tags=tags,<EOL>link_to_matchedfltr=linkToMatchedfltr,<EOL>compatibility_mode=compatibility_mode,<EOL>psd_files=psd_files)<EOL><DEDENT>elif tmpltbankMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir,<EOL>tags=tags, independent_ifos=True,<EOL>psd_files=psd_files)<EOL><DEDENT>elif tmpltbankMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir,<EOL>tags=tags, independent_ifos=False,<EOL>psd_files=psd_files)<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>tmplt_bank_filename=tmplt_banks[<NUM_LIT:0>].name<EOL>ext = tmplt_bank_filename.split('<STR_LIT:.>', <NUM_LIT:1>)[<NUM_LIT:1>]<EOL>logging.info("<STR_LIT>", ext)<EOL>if return_format is None :<EOL><INDENT>tmplt_banks_return = tmplt_banks<EOL><DEDENT>elif return_format in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if ext in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>') or ext in ('<STR_LIT>' , '<STR_LIT>'):<EOL><INDENT>tmplt_banks_return = pycbc.workflow.convert_bank_to_hdf(workflow,<EOL>tmplt_banks, "<STR_LIT>")<EOL><DEDENT><DEDENT>else :<EOL><INDENT>if ext == return_format:<EOL><INDENT>tmplt_banks_return = tmplt_banks<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError("<STR_LIT>"<EOL>"<STR_LIT>".format(ext, return_format))<EOL><DEDENT><DEDENT>logging.info("<STR_LIT>")<EOL>return tmplt_banks_return<EOL>
Setup template bank section of CBC workflow. This function is responsible for deciding which of the various template bank workflow generation utilities should be used. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. science_segs : Keyed dictionary of glue.segmentlist objects scienceSegs[ifo] holds the science segments to be analysed for each ifo. datafind_outs : pycbc.workflow.core.FileList The file list containing the datafind files. output_dir : path string The directory where data products will be placed. psd_files : pycbc.workflow.core.FileList The file list containing predefined PSDs, if provided. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- tmplt_banks : pycbc.workflow.core.FileList The FileList holding the details of all the template bank jobs.
f16003:m0
def setup_tmpltbank_dax_generated(workflow, science_segs, datafind_outs,<EOL>output_dir, tags=None,<EOL>link_to_matchedfltr=True,<EOL>compatibility_mode=False,<EOL>psd_files=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>cp = workflow.cp<EOL>ifos = science_segs.keys()<EOL>tmplt_bank_exe = os.path.basename(cp.get('<STR_LIT>', '<STR_LIT>'))<EOL>exe_class = select_tmpltbank_class(tmplt_bank_exe)<EOL>if link_to_matchedfltr:<EOL><INDENT>tmpltbank_exe = os.path.basename(cp.get('<STR_LIT>', '<STR_LIT>'))<EOL>link_exe_instance = select_matchedfilter_class(tmpltbank_exe)<EOL><DEDENT>else:<EOL><INDENT>link_exe_instance = None<EOL><DEDENT>tmplt_banks = FileList([])<EOL>for ifo in ifos:<EOL><INDENT>job_instance = exe_class(workflow.cp, '<STR_LIT>', ifo=ifo,<EOL>out_dir=output_dir,<EOL>tags=tags)<EOL>if cp.has_option_tags("<STR_LIT>", "<STR_LIT>", tags):<EOL><INDENT>job_instance.write_psd = True<EOL><DEDENT>else:<EOL><INDENT>job_instance.write_psd = False<EOL><DEDENT>if link_exe_instance:<EOL><INDENT>link_job_instance = link_exe_instance(cp, '<STR_LIT>', ifo=ifo,<EOL>out_dir=output_dir, tags=tags)<EOL><DEDENT>else:<EOL><INDENT>link_job_instance = None<EOL><DEDENT>sngl_ifo_job_setup(workflow, ifo, tmplt_banks, job_instance,<EOL>science_segs[ifo], datafind_outs,<EOL>link_job_instance=link_job_instance,<EOL>allow_overlap=True,<EOL>compatibility_mode=compatibility_mode)<EOL><DEDENT>return tmplt_banks<EOL>
Setup template bank jobs that are generated as part of the CBC workflow. This function will add numerous jobs to the CBC workflow using configuration options from the .ini file. The following executables are currently supported: * lalapps_tmpltbank * pycbc_geom_nonspin_bank Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. science_segs : Keyed dictionary of glue.segmentlist objects scienceSegs[ifo] holds the science segments to be analysed for each ifo. datafind_outs : pycbc.workflow.core.FileList The file list containing the datafind files. output_dir : path string The directory where data products will be placed. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. link_to_matchedfltr : boolean, optional (default=True) If this option is given, the job valid_times will be altered so that there will be one inspiral file for every template bank and they will cover the same time span. Note that this option must also be given during matched-filter generation to be meaningful. psd_file : pycbc.workflow.core.FileList The file list containing predefined PSDs, if provided. Returns -------- tmplt_banks : pycbc.workflow.core.FileList The FileList holding the details of all the template bank jobs.
f16003:m1
def setup_tmpltbank_without_frames(workflow, output_dir,<EOL>tags=None, independent_ifos=False,<EOL>psd_files=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>cp = workflow.cp<EOL>ifos = workflow.ifos<EOL>fullSegment = workflow.analysis_time<EOL>tmplt_bank_exe = os.path.basename(cp.get('<STR_LIT>','<STR_LIT>'))<EOL>if tmplt_bank_exe == '<STR_LIT>':<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>exe_instance = select_tmpltbank_class(tmplt_bank_exe)<EOL>tmplt_banks = FileList([])<EOL>if independent_ifos:<EOL><INDENT>ifoList = [ifo for ifo in ifos]<EOL><DEDENT>else:<EOL><INDENT>ifoList = [[ifo for ifo in ifos]]<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>", "<STR_LIT>", tags):<EOL><INDENT>exe_instance.write_psd = True<EOL><DEDENT>else:<EOL><INDENT>exe_instance.write_psd = False<EOL><DEDENT>for ifo in ifoList:<EOL><INDENT>job_instance = exe_instance(workflow.cp, '<STR_LIT>', ifo=ifo,<EOL>out_dir=output_dir,<EOL>tags=tags,<EOL>psd_files=psd_files)<EOL>node = job_instance.create_nodata_node(fullSegment)<EOL>workflow.add_node(node)<EOL>tmplt_banks += node.output_files<EOL><DEDENT>return tmplt_banks<EOL>
Setup CBC workflow to use a template bank (or banks) that are generated in the workflow, but do not use the data to estimate a PSD, and therefore do not vary over the duration of the workflow. This can either generate one bank that is valid for all ifos at all times, or multiple banks that are valid only for a single ifo at all times (one bank per ifo). Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. output_dir : path string The directory where the template bank outputs will be placed. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. independent_ifos : Boolean, optional (default=False) If given this will produce one template bank per ifo. If not given there will be on template bank to cover all ifos. psd_file : pycbc.workflow.core.FileList The file list containing predefined PSDs, if provided. Returns -------- tmplt_banks : pycbc.workflow.core.FileList The FileList holding the details of the template bank(s).
f16003:m2
def setup_tmpltbank_pregenerated(workflow, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>tmplt_banks = FileList([])<EOL>cp = workflow.cp<EOL>global_seg = workflow.analysis_time<EOL>user_tag = "<STR_LIT>"<EOL>try:<EOL><INDENT>pre_gen_bank = cp.get_opt_tags('<STR_LIT>',<EOL>'<STR_LIT>', tags)<EOL>pre_gen_bank = resolve_url(pre_gen_bank)<EOL>file_url = urlparse.urljoin('<STR_LIT>', urllib.pathname2url(pre_gen_bank))<EOL>curr_file = File(workflow.ifos, user_tag, global_seg, file_url,<EOL>tags=tags)<EOL>curr_file.PFN(file_url, site='<STR_LIT>')<EOL>tmplt_banks.append(curr_file)<EOL><DEDENT>except ConfigParser.Error:<EOL><INDENT>for ifo in workflow.ifos:<EOL><INDENT>try:<EOL><INDENT>pre_gen_bank = cp.get_opt_tags('<STR_LIT>',<EOL>'<STR_LIT>' % ifo.lower(),<EOL>tags)<EOL>pre_gen_bank = resolve_url(pre_gen_bank)<EOL>file_url = urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(pre_gen_bank))<EOL>curr_file = File(ifo, user_tag, global_seg, file_url,<EOL>tags=tags)<EOL>curr_file.PFN(file_url, site='<STR_LIT>')<EOL>tmplt_banks.append(curr_file)<EOL><DEDENT>except ConfigParser.Error:<EOL><INDENT>err_msg = "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>if tags:<EOL><INDENT>tagged_secs = "<STR_LIT:U+0020>".join("<STR_LIT>"%(ifo,) for ifo in workflow.ifos)<EOL>err_msg += "<STR_LIT>" %(tagged_secs,)<EOL><DEDENT>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>" %(ifo,)<EOL>raise ConfigParser.Error(err_msg)<EOL><DEDENT><DEDENT><DEDENT>return tmplt_banks<EOL>
Setup CBC workflow to use a pregenerated template bank. The bank given in cp.get('workflow','pregenerated-template-bank') will be used as the input file for all matched-filtering jobs. If this option is present, workflow will assume that it should be used and not generate template banks within the workflow. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- tmplt_banks : pycbc.workflow.core.FileList The FileList holding the details of the template bank.
f16003:m3
def parse_workflow_ini_file(cpFile,parsed_filepath=None):
<EOL>cp = read_ini_file(cpFile)<EOL>print(cp.sections())<EOL>cp = split_multi_sections(cp)<EOL>print(cp.sections())<EOL>sanity_check_subsections(cp)<EOL>print(cp.sections())<EOL>if parsed_filepath:<EOL><INDENT>fp = open(parsed_filepath,'<STR_LIT:w>')<EOL>cp.write(fp)<EOL>fp.close()<EOL><DEDENT>return cp<EOL>
Read a .ini file in, parse it as described in the documentation linked to above, and return the parsed ini file. Parameters ---------- cpFile : The path to a .ini file to be read in parsed_filepath: Boolean, optional If provided, the .ini file, after parsing, will be written to this location Returns ------- cp: The parsed ConfigParser class containing the read in .ini file
f16004:m0
def read_ini_file(cpFile):
<EOL>cp = ConfigParser.ConfigParser(interpolation=ConfigParser.ExtendedInterpolation())<EOL>fp = open(cpFile,'<STR_LIT:r>')<EOL>cp.read_file(fp)<EOL>fp.close()<EOL>return cp<EOL>
Read a .ini file and return it as a ConfigParser class. This function does none of the parsing/combining of sections. It simply reads the file and returns it unedited Parameters ---------- cpFile : The path to a .ini file to be read in Returns ------- cp: The ConfigParser class containing the read in .ini file
f16004:m1
def perform_extended_interpolation(cp,preserve_orig_file=False):
<EOL>if preserve_orig_file:<EOL><INDENT>cp = copy.deepcopy(cp)<EOL><DEDENT>for section in cp.sections():<EOL><INDENT>for option,value in cp.items(section):<EOL><INDENT>newStr = interpolate_string(option,cp,section)<EOL>if newStr != option:<EOL><INDENT>cp.set(section,newStr,value)<EOL>cp.remove_option(section,option)<EOL><DEDENT>newStr = interpolate_string(value,cp,section)<EOL>if newStr != value:<EOL><INDENT>cp.set(section,option,newStr)<EOL><DEDENT><DEDENT><DEDENT>return cp<EOL>
Filter through an ini file and replace all examples of ExtendedInterpolation formatting with the exact value. For values like ${example} this is replaced with the value that corresponds to the option called example ***in the same section*** For values like ${common|example} this is replaced with the value that corresponds to the option example in the section [common]. Note that in the python3 config parser this is ${common:example} but python2.7 interprets the : the same as a = and this breaks things Nested interpolation is not supported here. Parameters ---------- cp: ConfigParser object preserve_orig_file: Boolean, optional By default the input ConfigParser object will be modified in place. If this is set deepcopy will be used and the input will be preserved. Default = False Returns ------- cp: parsed ConfigParser object
f16004:m2
def split_multi_sections(cp,preserve_orig_file=False):
<EOL>if preserve_orig_file:<EOL><INDENT>cp = copy.deepcopy(cp)<EOL><DEDENT>for section in cp.sections():<EOL><INDENT>if '<STR_LIT:&>' not in section:<EOL><INDENT>continue<EOL><DEDENT>splitSections = section.split('<STR_LIT:&>')<EOL>for newSec in splitSections:<EOL><INDENT>if not cp.has_section(newSec):<EOL><INDENT>cp.add_section(newSec)<EOL><DEDENT>add_options_to_section(cp,newSec,cp.items(section))<EOL><DEDENT>cp.remove_section(section)<EOL><DEDENT>return cp<EOL>
Parse through a supplied ConfigParser object and splits any sections labelled with an "&" sign (for e.g. [inspiral&tmpltbank]) into [inspiral] and [tmpltbank] sections. If these individual sections already exist they will be appended to. If an option exists in both the [inspiral] and [inspiral&tmpltbank] sections an error will be thrown Parameters ---------- cp: The ConfigParser class preserve_orig_file: Boolean, optional By default the input ConfigParser object will be modified in place. If this is set deepcopy will be used and the input will be preserved. Default = False Returns ---------- cp: The ConfigParser class
f16004:m4
def sanity_check_subsections(cp):
<EOL>for section in cp.sections():<EOL><INDENT>for section2 in cp.sections():<EOL><INDENT>if section2.startswith(section + '<STR_LIT:->'):<EOL><INDENT>check_duplicate_options(cp,section,section2,raise_error=True)<EOL><DEDENT><DEDENT><DEDENT>
This function goes through the ConfigParset and checks that any options given in the [SECTION_NAME] section are not also given in any [SECTION_NAME-SUBSECTION] sections. Parameters ---------- cp: The ConfigParser class Returns ---------- None
f16004:m5
def add_options_to_section(cp,section,items,preserve_orig_file=False,overwrite_options=False):
<EOL>if not cp.has_section(section):<EOL><INDENT>raise ValueError('<STR_LIT>' %(section,))<EOL><DEDENT>if preserve_orig_file:<EOL><INDENT>cp = copy.deepcopy(cp)<EOL><DEDENT>for option,value in items:<EOL><INDENT>if not overwrite_options:<EOL><INDENT>if option in cp.options(section):<EOL><INDENT>raise ValueError('<STR_LIT>' +'<STR_LIT>' %(option,))<EOL><DEDENT><DEDENT>cp.set(section,option,value)<EOL><DEDENT>return cp<EOL>
Add a set of options and values to a section of a ConfigParser object. Will throw an error if any of the options being added already exist, this behaviour can be overridden if desired Parameters ---------- cp: The ConfigParser class section: string The name of the section to add options+values to items: list of tuples Each tuple contains (at [0]) the option and (at [1]) the value to add to the section of the ini file preserve_orig_file: Boolean, optional By default the input ConfigParser object will be modified in place. If this is set deepcopy will be used and the input will be preserved. Default = False overwrite_options: Boolean, optional By default this function will throw a ValueError if an option exists in both the original section in the ConfigParser *and* in the provided items. This will override so that the options+values given in items will replace the original values if the value is set to True. Default = True Returns ---------- cp: The ConfigParser class
f16004:m6
def check_duplicate_options(cp,section1,section2,raise_error=False):
<EOL>if not cp.has_section(section1):<EOL><INDENT>raise ValueError('<STR_LIT>'%(section1,))<EOL><DEDENT>if not cp.has_section(section2):<EOL><INDENT>raise ValueError('<STR_LIT>'%(section2,))<EOL><DEDENT>items1 = cp.items(section1)<EOL>items2 = cp.items(section2)<EOL>duplicates = [x for x in items1 if x in items2]<EOL>if duplicates and raise_error:<EOL><INDENT>raise ValueError('<STR_LIT>' +'<STR_LIT>'%(section1,section2,duplicates.join('<STR_LIT:U+0020>')))<EOL><DEDENT>return duplicates<EOL>
Check for duplicate options in two sections, section1 and section2. Will return True if there are duplicate options and False if not Parameters ---------- cp: The ConfigParser class section1: string The name of the first section to compare section2: string The name of the second section to compare raise_error: Boolean, optional If True, raise an error if duplicates are present. Default = False Returns ---------- duplicate: List List of duplicate options
f16004:m7
def check_output_error_and_retcode(*popenargs, **kwargs):
if '<STR_LIT>' in kwargs:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>process = subprocess.Popen(stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>*popenargs, **kwargs)<EOL>output, error = process.communicate()<EOL>retcode = process.poll()<EOL>return output, error, retcode<EOL>
This function is used to obtain the stdout of a command. It is only used internally, recommend using the make_external_call command if you want to call external executables.
f16005:m0
def make_analysis_dir(path):
if path is not None:<EOL><INDENT>makedir(os.path.join(path, '<STR_LIT>'))<EOL><DEDENT>
Make the analysis directory path, any parent directories that don't already exist, and the 'logs' subdirectory of path.
f16005:m2
def is_condor_exec(exe_path):
if check_output(['<STR_LIT>', '<STR_LIT>', exe_path]).find('<STR_LIT>') != -<NUM_LIT:1>:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
Determine if an executable is condor-compiled Parameters ---------- exe_path : str The executable path Returns ------- truth_value : boolean Return True if the exe is condor compiled, False otherwise.
f16005:m3
def make_external_call(cmdList, out_dir=None, out_basename='<STR_LIT>',<EOL>shell=False, fail_on_error=True):
if out_dir:<EOL><INDENT>outBase = os.path.join(out_dir,out_basename)<EOL>errFile = outBase + '<STR_LIT>'<EOL>errFP = open(errFile, '<STR_LIT:w>')<EOL>outFile = outBase + '<STR_LIT>'<EOL>outFP = open(outFile, '<STR_LIT:w>')<EOL>cmdFile = outBase + '<STR_LIT>'<EOL>cmdFP = open(cmdFile, '<STR_LIT:w>')<EOL>cmdFP.write('<STR_LIT:U+0020>'.join(cmdList))<EOL>cmdFP.close()<EOL><DEDENT>else:<EOL><INDENT>errFile = None<EOL>outFile = None<EOL>cmdFile = None<EOL>errFP = None<EOL>outFP = None<EOL><DEDENT>msg = "<STR_LIT>" %('<STR_LIT:U+0020>'.join(cmdList))<EOL>logging.debug(msg)<EOL>errCode = subprocess.call(cmdList, stderr=errFP, stdout=outFP,shell=shell)<EOL>if errFP:<EOL><INDENT>errFP.close()<EOL><DEDENT>if outFP:<EOL><INDENT>outFP.close()<EOL><DEDENT>if errCode and fail_on_error:<EOL><INDENT>raise CalledProcessErrorMod(errCode, '<STR_LIT:U+0020>'.join(cmdList),<EOL>errFile=errFile, outFile=outFile, cmdFile=cmdFile)<EOL><DEDENT>logging.debug("<STR_LIT>")<EOL>
Use this to make an external call using the python subprocess module. See the subprocess documentation for more details of how this works. http://docs.python.org/2/library/subprocess.html Parameters ----------- cmdList : list of strings This list of strings contains the command to be run. See the subprocess documentation for more details. out_dir : string If given the stdout and stderr will be redirected to os.path.join(out_dir,out_basename+[".err",".out]) If not given the stdout and stderr will not be recorded out_basename : string The value of out_basename used to construct the file names used to store stderr and stdout. See out_dir for more information. shell : boolean, default=False This value will be given as the shell kwarg to the subprocess call. **WARNING** See the subprocess documentation for details on this Kwarg including a warning about a serious security exploit. Do not use this unless you are sure it is necessary **and** safe. fail_on_error : boolean, default=True If set to true an exception will be raised if the external command does not return a code of 0. If set to false such failures will be ignored. Stderr and Stdout can be stored in either case using the out_dir and out_basename options. Returns -------- exitCode : int The code returned by the process.
f16005:m4
def get_full_analysis_chunk(science_segs):
extents = [science_segs[ifo].extent() for ifo in science_segs.keys()]<EOL>min, max = extents[<NUM_LIT:0>]<EOL>for lo, hi in extents:<EOL><INDENT>if min > lo:<EOL><INDENT>min = lo<EOL><DEDENT>if max < hi:<EOL><INDENT>max = hi<EOL><DEDENT><DEDENT>fullSegment = segments.segment(min, max)<EOL>return fullSegment<EOL>
Function to find the first and last time point contained in the science segments and return a single segment spanning that full time. Parameters ----------- science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. Returns -------- fullSegment : ligo.segments.segment The segment spanning the first and last time point contained in science_segs.
f16005:m5
def get_random_label():
return '<STR_LIT>'.join(random.choice(string.ascii_uppercase + string.digits)for _ in range(<NUM_LIT:15>))<EOL>
Get a random label string to use when clustering jobs.
f16005:m6
def __init__(self, cp, name,<EOL>universe=None, ifos=None, out_dir=None, tags=None):
if isinstance(ifos, string_types):<EOL><INDENT>self.ifo_list = [ifos]<EOL><DEDENT>else:<EOL><INDENT>self.ifo_list = ifos<EOL><DEDENT>if self.ifo_list is not None:<EOL><INDENT>self.ifo_string = '<STR_LIT>'.join(self.ifo_list)<EOL><DEDENT>else:<EOL><INDENT>self.ifo_string = None<EOL><DEDENT>self.cp = cp<EOL>self.universe=universe<EOL>self.container_cls = None<EOL>self.container_type = None<EOL>try:<EOL><INDENT>self.installed = cp.getboolean('<STR_LIT>' % name, '<STR_LIT>')<EOL><DEDENT>except:<EOL><INDENT>self.installed = True<EOL><DEDENT>self.name=name<EOL>self.update_current_tags(tags)<EOL>self.update_output_directory(out_dir=out_dir)<EOL>self.update_current_retention_level(self.current_retention_level)<EOL>try:<EOL><INDENT>self.container_type = cp.get('<STR_LIT>' % name,<EOL>'<STR_LIT>')<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>if self.container_type is not None:<EOL><INDENT>self.container_img = cp.get('<STR_LIT>' % name,<EOL>'<STR_LIT>')<EOL>try:<EOL><INDENT>self.container_site = cp.get('<STR_LIT>' % name,<EOL>'<STR_LIT>')<EOL><DEDENT>except:<EOL><INDENT>self.container_site = '<STR_LIT>'<EOL><DEDENT>try:<EOL><INDENT>self.container_mount = cp.get('<STR_LIT>' % name,<EOL>'<STR_LIT>').split('<STR_LIT:U+002C>')<EOL><DEDENT>except:<EOL><INDENT>self.container_mount = None<EOL><DEDENT>self.container_cls = Pegasus.DAX3.Container("<STR_LIT>".format(<EOL>name),<EOL>self.container_type,<EOL>self.container_img,<EOL>imagesite=self.container_site,<EOL>mount=self.container_mount)<EOL>super(Executable, self).__init__(self.tagged_name,<EOL>installed=self.installed,<EOL>container=self.container_cls)<EOL><DEDENT>else:<EOL><INDENT>super(Executable, self).__init__(self.tagged_name,<EOL>installed=self.installed)<EOL><DEDENT>self._set_pegasus_profile_options()<EOL>exe_path = cp.get('<STR_LIT>', name)<EOL>self.needs_fetching = False<EOL>exe_url = urlparse.urlparse(exe_path)<EOL>try:<EOL><INDENT>exe_site_list = cp.get('<STR_LIT>' % name, '<STR_LIT>')<EOL><DEDENT>except:<EOL><INDENT>exe_site_list = '<STR_LIT>'<EOL><DEDENT>for s in exe_site_list.split('<STR_LIT:U+002C>'):<EOL><INDENT>exe_site = s.strip()<EOL>if exe_url.scheme in ['<STR_LIT>', '<STR_LIT:file>']:<EOL><INDENT>if exe_site is '<STR_LIT>':<EOL><INDENT>if os.path.isfile(exe_url.path) is False:<EOL><INDENT>raise TypeError("<STR_LIT>"<EOL>"<STR_LIT>" % (name, exe_path,<EOL>exe_site))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.needs_fetching = True<EOL><DEDENT>self.add_pfn(exe_path, site=exe_site)<EOL>logging.debug("<STR_LIT>"<EOL>"<STR_LIT>" % (name, exe_url.path, exe_site))<EOL><DEDENT>if self.universe is None:<EOL><INDENT>if is_condor_exec(exe_path):<EOL><INDENT>self.universe = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>self.universe = '<STR_LIT>'<EOL><DEDENT><DEDENT>logging.debug("<STR_LIT>"<EOL>% (name, self.universe))<EOL>self.set_universe(self.universe)<EOL>if hasattr(self, "<STR_LIT>"):<EOL><INDENT>self.add_profile('<STR_LIT>', '<STR_LIT>', self.group_jobs)<EOL><DEDENT>
Initialize the Executable class. Parameters ----------- cp : ConfigParser object The ConfigParser object holding the workflow configuration settings exec_name : string Executable name universe : string, optional Condor universe to run the job in ifos : string or list, optional The ifo(s) that the Job is valid for. If the job is independently valid for multiple ifos it can be provided as a list. Ie. ['H1',L1','V1'], if the job is only valid for the combination of ifos (for e.g. ligolw_thinca) then this can be supplied as, for e.g. "H1L1V1". out_dir: path, optional The folder to store output files of this job. tags : list of strings A list of strings that is used to identify this job.
f16005:c1:m0
@property<EOL><INDENT>def ifo(self):<DEDENT>
if self.ifo_list and len(self.ifo_list) == <NUM_LIT:1>:<EOL><INDENT>return self.ifo_list[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>" %(str(self.ifo_list),)<EOL>raise TypeError(errMsg)<EOL><DEDENT>
Return the ifo. If only one ifo in the ifo list this will be that ifo. Otherwise an error is raised.
f16005:c1:m1
def add_ini_profile(self, cp, sec):
for opt in cp.options(sec):<EOL><INDENT>namespace = opt.split('<STR_LIT:|>')[<NUM_LIT:0>]<EOL>if namespace == '<STR_LIT>' or namespace == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>value = string.strip(cp.get(sec, opt))<EOL>key = opt.split('<STR_LIT:|>')[<NUM_LIT:1>]<EOL>self.add_profile(namespace, key, value, force=True)<EOL>if namespace == '<STR_LIT>' and key == '<STR_LIT>':<EOL><INDENT>self.execution_site = value<EOL><DEDENT><DEDENT>
Add profile from configuration file. Parameters ----------- cp : ConfigParser object The ConfigParser object holding the workflow configuration settings sec : string The section containing options for this job.
f16005:c1:m2
def add_ini_opts(self, cp, sec):
for opt in cp.options(sec):<EOL><INDENT>value = string.strip(cp.get(sec, opt))<EOL>opt = '<STR_LIT>' %(opt,)<EOL>if opt in self.file_input_options:<EOL><INDENT>values = [path for path in value.split('<STR_LIT:U+0020>') if path]<EOL>self.common_raw_options.append(opt)<EOL>self.common_raw_options.append('<STR_LIT:U+0020>')<EOL>for path in values:<EOL><INDENT>split_path = path.split('<STR_LIT::>', <NUM_LIT:1>)<EOL>if len(split_path) == <NUM_LIT:1>:<EOL><INDENT>ifo = None<EOL>path = path<EOL><DEDENT>else:<EOL><INDENT>if split_path[<NUM_LIT:1>].startswith('<STR_LIT>'):<EOL><INDENT>ifo = None<EOL>path = path<EOL><DEDENT>else:<EOL><INDENT>ifo = split_path[<NUM_LIT:0>]<EOL>path = split_path[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>curr_lfn = os.path.basename(path)<EOL>if os.path.isfile(path):<EOL><INDENT>curr_pfn = urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(<EOL>os.path.abspath(path)))<EOL><DEDENT>else:<EOL><INDENT>curr_pfn = path<EOL><DEDENT>if curr_lfn in file_input_from_config_dict.keys():<EOL><INDENT>file_pfn = file_input_from_config_dict[curr_lfn][<NUM_LIT:2>]<EOL>assert(file_pfn == curr_pfn)<EOL>curr_file = file_input_from_config_dict[curr_lfn][<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>local_file_path = resolve_url(curr_pfn)<EOL>curr_file = File.from_path(local_file_path)<EOL>tuple_val = (local_file_path, curr_file, curr_pfn)<EOL>file_input_from_config_dict[curr_lfn] = tuple_val<EOL><DEDENT>self.common_input_files.append(curr_file)<EOL>if ifo:<EOL><INDENT>self.common_raw_options.append(ifo + '<STR_LIT::>')<EOL>self.common_raw_options.append(curr_file.dax_repr)<EOL><DEDENT>else:<EOL><INDENT>self.common_raw_options.append(curr_file.dax_repr)<EOL><DEDENT>self.common_raw_options.append('<STR_LIT:U+0020>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.common_options += [opt, value]<EOL><DEDENT><DEDENT>
Add job-specific options from configuration file. Parameters ----------- cp : ConfigParser object The ConfigParser object holding the workflow configuration settings sec : string The section containing options for this job.
f16005:c1:m3
def add_opt(self, opt, value=None):
if value is None:<EOL><INDENT>self.common_options += [opt]<EOL><DEDENT>else:<EOL><INDENT>self.common_options += [opt, value]<EOL><DEDENT>
Add option to job. Parameters ----------- opt : string Name of option (e.g. --output-file-format) value : string, (default=None) The value for the option (no value if set to None).
f16005:c1:m4
def get_opt(self, opt):
for sec in self.sections:<EOL><INDENT>try:<EOL><INDENT>key = self.cp.get(sec, opt)<EOL>if key:<EOL><INDENT>return key<EOL><DEDENT><DEDENT>except ConfigParser.NoOptionError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>
Get value of option from configuration file Parameters ----------- opt : string Name of option (e.g. output-file-format) Returns -------- value : string The value for the option. Returns None if option not present.
f16005:c1:m5
def has_opt(self, opt):
for sec in self.sections:<EOL><INDENT>val = self.cp.has_option(sec, opt)<EOL>if val:<EOL><INDENT>return val<EOL><DEDENT><DEDENT>return False<EOL>
Check if option is present in configuration file Parameters ----------- opt : string Name of option (e.g. output-file-format)
f16005:c1:m6
def create_node(self):
return Node(self)<EOL>
Default node constructor. This is usually overridden by subclasses of Executable.
f16005:c1:m7
def update_current_retention_level(self, value):
<EOL>self.current_retention_level = value<EOL>try:<EOL><INDENT>global_retention_level =self.cp.get_opt_tags("<STR_LIT>", "<STR_LIT>",<EOL>self.tags+[self.name])<EOL><DEDENT>except ConfigParser.Error:<EOL><INDENT>msg="<STR_LIT>"<EOL>msg+="<STR_LIT>"<EOL>msg+="<STR_LIT>"<EOL>logging.warn(msg)<EOL>self.retain_files = True<EOL>self.global_retention_threshold = <NUM_LIT:1><EOL>self.cp.set("<STR_LIT>", "<STR_LIT>", "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>retention_choices = {<EOL>'<STR_LIT>' : <NUM_LIT:1>,<EOL>'<STR_LIT>' : <NUM_LIT:2>,<EOL>'<STR_LIT>' : <NUM_LIT:3>,<EOL>'<STR_LIT>' : <NUM_LIT:4><EOL>}<EOL>try:<EOL><INDENT>self.global_retention_threshold =retention_choices[global_retention_level]<EOL><DEDENT>except KeyError:<EOL><INDENT>err_msg = "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>".format(global_retention_level)<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>raise ValueError(err_msg)<EOL><DEDENT>if self.current_retention_level == <NUM_LIT:5>:<EOL><INDENT>self.retain_files = True<EOL>if type(self).__name__ in Executable._warned_classes_list:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>warn_msg = "<STR_LIT>"<EOL>warn_msg += "<STR_LIT>".format(type(self))<EOL>warn_msg += "<STR_LIT>"<EOL>warn_msg += "<STR_LIT>"<EOL>logging.warn(warn_msg)<EOL>Executable._warned_classes_list.append(type(self).__name__)<EOL><DEDENT><DEDENT>elif self.global_retention_threshold > self.current_retention_level:<EOL><INDENT>self.retain_files = False<EOL><DEDENT>else:<EOL><INDENT>self.retain_files = True<EOL><DEDENT><DEDENT>
Set a new value for the current retention level. This updates the value of self.retain_files for an updated value of the retention level. Parameters ----------- value : int The new value to use for the retention level.
f16005:c1:m8
def update_current_tags(self, tags):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>tags = [tag.upper() for tag in tags]<EOL>self.tags = tags<EOL>if len(tags) > <NUM_LIT:6>:<EOL><INDENT>warn_msg = "<STR_LIT>"<EOL>warn_msg += "<STR_LIT>".format('<STR_LIT:U+0020>'.join(tags))<EOL>warn_msg += "<STR_LIT>".format(self.name)<EOL>logging.info(warn_msg)<EOL><DEDENT>if len(tags) != <NUM_LIT:0>:<EOL><INDENT>self.tagged_name = "<STR_LIT>".format(self.name, '<STR_LIT:_>'.join(tags))<EOL><DEDENT>else:<EOL><INDENT>self.tagged_name = self.name<EOL><DEDENT>if self.ifo_string is not None:<EOL><INDENT>self.tagged_name = "<STR_LIT>".format(self.tagged_name,<EOL>self.ifo_string)<EOL><DEDENT>sections = [self.name]<EOL>if self.ifo_list is not None:<EOL><INDENT>if len(self.ifo_list) > <NUM_LIT:1>:<EOL><INDENT>sec_tags = tags + self.ifo_list + [self.ifo_string]<EOL><DEDENT>else:<EOL><INDENT>sec_tags = tags + self.ifo_list<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sec_tags = tags<EOL><DEDENT>for sec_len in range(<NUM_LIT:1>, len(sec_tags)+<NUM_LIT:1>):<EOL><INDENT>for tag_permutation in permutations(sec_tags, sec_len):<EOL><INDENT>joined_name = '<STR_LIT:->'.join(tag_permutation)<EOL>section = '<STR_LIT>'.format(self.name, joined_name.lower())<EOL>if self.cp.has_section(section):<EOL><INDENT>sections.append(section)<EOL><DEDENT><DEDENT><DEDENT>self.sections = sections<EOL>for sec1, sec2 in combinations(sections, <NUM_LIT:2>):<EOL><INDENT>self.cp.check_duplicate_options(sec1, sec2, raise_error=True)<EOL><DEDENT>self.common_options = []<EOL>self.common_raw_options = []<EOL>self.common_input_files = []<EOL>for sec in sections:<EOL><INDENT>if self.cp.has_section(sec):<EOL><INDENT>self.add_ini_opts(self.cp, sec)<EOL><DEDENT>else:<EOL><INDENT>warn_string = "<STR_LIT>"<EOL>warn_string += "<STR_LIT>".format(sec)<EOL>logging.warn(warn_string)<EOL><DEDENT><DEDENT>
Set a new set of tags for this executable. Update the set of tags that this job will use. This updated default file naming and shared options. It will *not* update the pegasus profile, which belong to the executable and cannot be different for different nodes. Parameters ----------- tags : list The new list of tags to consider.
f16005:c1:m9
def update_output_directory(self, out_dir=None):
<EOL>if out_dir is not None:<EOL><INDENT>self.out_dir = out_dir<EOL><DEDENT>elif len(self.tags) == <NUM_LIT:0>:<EOL><INDENT>self.out_dir = self.name<EOL><DEDENT>else:<EOL><INDENT>self.out_dir = self.tagged_name<EOL><DEDENT>if not os.path.isabs(self.out_dir):<EOL><INDENT>self.out_dir = os.path.join(os.getcwd(), self.out_dir)<EOL><DEDENT>
Update the default output directory for output files. Parameters ----------- out_dir : string (optional, default=None) If provided use this as the output directory. Else choose this automatically from the tags.
f16005:c1:m10
def _set_pegasus_profile_options(self):
<EOL>if self.cp.has_section('<STR_LIT>'):<EOL><INDENT>self.add_ini_profile(self.cp, '<STR_LIT>')<EOL><DEDENT>for sec in self.sections:<EOL><INDENT>if self.cp.has_section('<STR_LIT>'.format(sec)):<EOL><INDENT>self.add_ini_profile(self.cp,<EOL>'<STR_LIT>'.format(sec))<EOL><DEDENT><DEDENT>
Set the pegasus-profile settings for this Executable. These are a property of the Executable and not of nodes that it will spawn. Therefore it *cannot* be updated without also changing values for nodes that might already have been created. Therefore this is only called once in __init__. Second calls to this will fail.
f16005:c1:m11
def __init__(self, args, name):
super(Workflow, self).__init__(name)<EOL>self.cp = WorkflowConfigParser.from_args(args)<EOL>start_time = int(self.cp.get("<STR_LIT>", "<STR_LIT>"))<EOL>end_time = int(self.cp.get("<STR_LIT>", "<STR_LIT>"))<EOL>self.analysis_time = segments.segment([start_time, end_time])<EOL>ifos = []<EOL>for ifo in self.cp.options('<STR_LIT>'):<EOL><INDENT>ifos.append(ifo.upper())<EOL><DEDENT>self.ifos = ifos<EOL>self.ifos.sort(key=str.lower)<EOL>self.ifo_string = '<STR_LIT>'.join(self.ifos)<EOL>self._inputs = FileList([])<EOL>self._outputs = FileList([])<EOL>
Create a pycbc workflow Parameters ---------- args : argparse.ArgumentParser The command line options to initialize a CBC workflow.
f16005:c2:m0
def execute_node(self, node, verbatim_exe = False):
node.executed = True<EOL>if node.executable.needs_fetching:<EOL><INDENT>try:<EOL><INDENT>pfn = node.executable.get_pfn()<EOL><DEDENT>except:<EOL><INDENT>pfn = node.executable.get_pfn('<STR_LIT>')<EOL><DEDENT>resolved = resolve_url(pfn, permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)<EOL>node.executable.clear_pfns()<EOL>node.executable.add_pfn(urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(<EOL>resolved)), site='<STR_LIT>')<EOL><DEDENT>cmd_list = node.get_command_line()<EOL>curr_dir = os.getcwd()<EOL>out_dir = node.executable.out_dir<EOL>os.chdir(out_dir)<EOL>make_external_call(cmd_list, out_dir=os.path.join(out_dir, '<STR_LIT>'),<EOL>out_basename=node.executable.name)<EOL>os.chdir(curr_dir)<EOL>for fil in node._outputs:<EOL><INDENT>fil.node = None<EOL>fil.PFN(urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(fil.storage_path)),<EOL>site='<STR_LIT>')<EOL><DEDENT>
Execute this node immediately on the local machine
f16005:c2:m4
def save_config(self, fname, output_dir, cp=None):
cp = self.cp if cp is None else cp<EOL>ini_file_path = os.path.join(output_dir, fname)<EOL>with open(ini_file_path, "<STR_LIT:wb>") as fp:<EOL><INDENT>cp.write(fp)<EOL><DEDENT>ini_file = FileList([File(self.ifos, "<STR_LIT>",<EOL>self.analysis_time,<EOL>file_url="<STR_LIT>" + ini_file_path)])<EOL>return ini_file<EOL>
Writes configuration file to disk and returns a pycbc.workflow.File instance for the configuration file. Parameters ----------- fname : string The filename of the configuration file written to disk. output_dir : string The directory where the file is written to disk. cp : ConfigParser object The ConfigParser object to write. If None then uses self.cp. Returns ------- FileList The FileList object with the configuration file.
f16005:c2:m7
def new_output_file_opt(self, valid_seg, extension, option_name, tags=None,<EOL>store_file=None, use_tmp_subdirs=False):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>all_tags = copy.deepcopy(self.executable.tags)<EOL>for tag in tags:<EOL><INDENT>if tag not in all_tags:<EOL><INDENT>all_tags.append(tag)<EOL><DEDENT><DEDENT>store_file = store_file if store_file is not None else self.executable.retain_files<EOL>fil = File(self.executable.ifo_list, self.executable.name,<EOL>valid_seg, extension=extension, store_file=store_file,<EOL>directory=self.executable.out_dir, tags=all_tags,<EOL>use_tmp_subdirs=use_tmp_subdirs)<EOL>self.add_output_opt(option_name, fil)<EOL>return fil<EOL>
This function will create a workflow.File object corresponding to the given information and then add that file as output of this node. Parameters ----------- valid_seg : ligo.segments.segment The time span over which the job is valid for. extension : string The extension to be used at the end of the filename. E.g. '.xml' or '.sqlite'. option_name : string The option that is used when setting this job as output. For e.g. 'output-name' or 'output-file', whatever is appropriate for the current executable. tags : list of strings, (optional, default=[]) These tags will be added to the list of tags already associated with the job. They can be used to uniquely identify this output file. store_file : Boolean, (optional, default=True) This file is to be added to the output mapper and will be stored in the specified output location if True. If false file will be removed when no longer needed in the workflow.
f16005:c3:m2
def add_multiifo_input_list_opt(self, opt, inputs):
<EOL>self.add_raw_arg(opt)<EOL>self.add_raw_arg('<STR_LIT:U+0020>')<EOL>for infile in inputs:<EOL><INDENT>self.add_raw_arg(infile.ifo)<EOL>self.add_raw_arg('<STR_LIT::>')<EOL>self.add_raw_arg(infile.name)<EOL>self.add_raw_arg('<STR_LIT:U+0020>')<EOL>self._add_input(infile)<EOL><DEDENT>
Add an option that determines a list of inputs from multiple detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 .....
f16005:c3:m3
def add_multiifo_output_list_opt(self, opt, outputs):
<EOL>self.add_raw_arg(opt)<EOL>self.add_raw_arg('<STR_LIT:U+0020>')<EOL>for outfile in outputs:<EOL><INDENT>self.add_raw_arg(outfile.ifo)<EOL>self.add_raw_arg('<STR_LIT::>')<EOL>self.add_raw_arg(outfile.name)<EOL>self.add_raw_arg('<STR_LIT:U+0020>')<EOL>self._add_output(outfile)<EOL><DEDENT>
Add an option that determines a list of outputs from multiple detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 .....
f16005:c3:m4
def new_multiifo_output_list_opt(self, opt, ifos, analysis_time, extension,<EOL>tags=None, store_file=None,<EOL>use_tmp_subdirs=False):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>all_tags = copy.deepcopy(self.executable.tags)<EOL>for tag in tags:<EOL><INDENT>if tag not in all_tags:<EOL><INDENT>all_tags.append(tag)<EOL><DEDENT><DEDENT>output_files = FileList([])<EOL>store_file = store_file if store_file is not Noneelse self.executable.retain_files<EOL>for ifo in ifos:<EOL><INDENT>curr_file = File(ifo, self.executable.name, analysis_time,<EOL>extension=extension, store_file=store_file,<EOL>directory=self.executable.out_dir, tags=all_tags,<EOL>use_tmp_subdirs=use_tmp_subdirs)<EOL>output_files.append(curr_file)<EOL><DEDENT>self.add_multiifo_output_list_opt(opt, output_files)<EOL>
Add an option that determines a list of outputs from multiple detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 ..... File names are created internally from the provided extension and analysis time.
f16005:c3:m5
@property<EOL><INDENT>def output_file(self):<DEDENT>
out_files = self.output_files<EOL>if len(out_files) != <NUM_LIT:1>:<EOL><INDENT>err_msg = "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>" %(len(out_files))<EOL>raise ValueError(err_msg)<EOL><DEDENT>return out_files[<NUM_LIT:0>]<EOL>
If only one output file return it. Otherwise raise an exception.
f16005:c3:m7
def __init__(self, ifos, exe_name, segs, file_url=None,<EOL>extension=None, directory=None, tags=None,<EOL>store_file=True, use_tmp_subdirs=False):
self.metadata = {}<EOL>if isinstance(ifos, string_types):<EOL><INDENT>self.ifo_list = [ifos]<EOL><DEDENT>else:<EOL><INDENT>self.ifo_list = ifos<EOL><DEDENT>self.ifo_string = '<STR_LIT>'.join(self.ifo_list)<EOL>self.description = exe_name<EOL>if isinstance(segs, segments.segment):<EOL><INDENT>self.segment_list = segments.segmentlist([segs])<EOL><DEDENT>elif isinstance(segs, (segments.segmentlist)):<EOL><INDENT>self.segment_list = segs<EOL><DEDENT>else:<EOL><INDENT>err = "<STR_LIT>"<EOL>err += "<STR_LIT>" %(str(type(segs)),)<EOL>raise ValueError(err)<EOL><DEDENT>if tags is not None:<EOL><INDENT>self.tags = [t.upper() for t in tags]<EOL><DEDENT>else:<EOL><INDENT>self.tags = []<EOL><DEDENT>if len(self.tags):<EOL><INDENT>self.tag_str = '<STR_LIT:_>'.join(tags)<EOL>tagged_description = '<STR_LIT:_>'.join([self.description] + tags)<EOL><DEDENT>else:<EOL><INDENT>tagged_description = self.description<EOL><DEDENT>self.ifo_string = self.ifo_string.upper()<EOL>self.tagged_description = tagged_description.upper()<EOL>if not file_url:<EOL><INDENT>if not extension:<EOL><INDENT>raise TypeError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>if not directory:<EOL><INDENT>raise TypeError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>filename = self._filename(self.ifo_string, self.tagged_description,<EOL>extension, self.segment_list.extent())<EOL>path = os.path.join(directory, filename)<EOL>if not os.path.isabs(path):<EOL><INDENT>path = os.path.join(os.getcwd(), path)<EOL><DEDENT>file_url = urlparse.urlunparse(['<STR_LIT:file>', '<STR_LIT:localhost>', path, None,<EOL>None, None])<EOL><DEDENT>if use_tmp_subdirs and len(self.segment_list):<EOL><INDENT>pegasus_lfn = str(int(self.segment_list.extent()[<NUM_LIT:0>]))[:-<NUM_LIT:4>]<EOL>pegasus_lfn = pegasus_lfn + '<STR_LIT:/>' + os.path.basename(file_url)<EOL><DEDENT>else:<EOL><INDENT>pegasus_lfn = os.path.basename(file_url)<EOL><DEDENT>super(File, self).__init__(pegasus_lfn)<EOL>if store_file:<EOL><INDENT>self.storage_path = urlparse.urlsplit(file_url).path<EOL><DEDENT>else:<EOL><INDENT>self.storage_path = None<EOL><DEDENT>
Create a File instance Parameters ---------- ifos : string or list The ifo(s) that the File is valid for. If the file is independently valid for multiple ifos it can be provided as a list. Ie. ['H1',L1','V1'], if the file is only valid for the combination of ifos (for e.g. ligolw_thinca output) then this can be supplied as, for e.g. "H1L1V1". exe_name: string A short description of the executable description, tagging only the program that ran this job. segs : glue.segment or glue.segmentlist The time span that the OutFile is valid for. Note that this is *not* the same as the data that the job that made the file reads in. Lalapps_inspiral jobs do not analyse the first an last 72s of the data that is read, and are therefore not valid at those times. If the time is not continuous a segmentlist can be supplied. file_url : url (optional, default=None) If this is *not* supplied, extension and directory must be given. If specified this explicitly points to the url of the file, or the url where the file will be generated when made in the workflow. extension : string (optional, default=None) Either supply this *and* directory *or* supply only file_url. If given this gives the extension at the end of the file name. The full file name will be inferred from the other arguments following the workflow standard. directory : string (optional, default=None) Either supply this *and* extension *or* supply only file_url. If given this gives the directory in which the file exists, or will exists. The file name will be inferred from the other arguments following the workflow standard. tags : list of strings (optional, default=None) This is a list of descriptors describing what this file is. For e.g. this might be ["BNSINJECTIONS" ,"LOWMASS","CAT_2_VETO"]. These are used in file naming.
f16005:c4:m0
def __getstate__(self):
for i, seg in enumerate(self.segment_list):<EOL><INDENT>self.segment_list[i] = segments.segment(float(seg[<NUM_LIT:0>]), float(seg[<NUM_LIT:1>]))<EOL><DEDENT>self.cache_entry = None<EOL>safe_dict = copy.copy(self.__dict__)<EOL>safe_dict['<STR_LIT>'] = None<EOL>return safe_dict<EOL>
Allow the ahope file to be picklable. This disables the usage of the internal cache entry.
f16005:c4:m1
def add_metadata(self, key, value):
self.metadata[key] = value<EOL>
Add arbitrary metadata to this file
f16005:c4:m2
@property<EOL><INDENT>def ifo(self):<DEDENT>
if len(self.ifo_list) == <NUM_LIT:1>:<EOL><INDENT>return self.ifo_list[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>err = "<STR_LIT>"<EOL>err += "<STR_LIT>" %(str(self.ifo_list),)<EOL>raise TypeError(err)<EOL><DEDENT>
If only one ifo in the ifo_list this will be that ifo. Otherwise an error is raised.
f16005:c4:m3
@property<EOL><INDENT>def segment(self):<DEDENT>
if len(self.segment_list) == <NUM_LIT:1>:<EOL><INDENT>return self.segment_list[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>err = "<STR_LIT>"<EOL>err += "<STR_LIT>" %(str(self.segment_list),)<EOL>raise TypeError(err)<EOL><DEDENT>
If only one segment in the segmentlist this will be that segment. Otherwise an error is raised.
f16005:c4:m4
@property<EOL><INDENT>def cache_entry(self):<DEDENT>
if self.storage_path is None:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>file_url = urlparse.urlunparse(['<STR_LIT:file>', '<STR_LIT:localhost>', self.storage_path, None,<EOL>None, None])<EOL>cache_entry = lal.utils.CacheEntry(self.ifo_string,<EOL>self.tagged_description, self.segment_list.extent(), file_url)<EOL>cache_entry.workflow_file = self<EOL>return cache_entry<EOL>
Returns a CacheEntry instance for File.
f16005:c4:m5
def _filename(self, ifo, description, extension, segment):
if extension.startswith('<STR_LIT:.>'):<EOL><INDENT>extension = extension[<NUM_LIT:1>:]<EOL><DEDENT>start = int(segment[<NUM_LIT:0>])<EOL>end = int(math.ceil(segment[<NUM_LIT:1>]))<EOL>duration = str(end-start)<EOL>start = str(start)<EOL>return "<STR_LIT>" % (ifo, description.upper(), start,<EOL>duration, extension)<EOL>
Construct the standard output filename. Should only be used internally of the File class.
f16005:c4:m6
def categorize_by_attr(self, attribute):
<EOL>flist = sorted(self, key=attrgetter(attribute), reverse=True)<EOL>groups = []<EOL>keys = []<EOL>for k, g in groupby(flist, attrgetter(attribute)):<EOL><INDENT>groups.append(FileList(g))<EOL>keys.append(k)<EOL><DEDENT>return keys, groups<EOL>
Function to categorize a FileList by a File object attribute (eg. 'segment', 'ifo', 'description'). Parameters ----------- attribute : string File object attribute to categorize FileList Returns -------- keys : list A list of values for an attribute groups : list A list of FileLists
f16005:c5:m0
def find_output(self, ifo, time):
<EOL>try:<EOL><INDENT>lenTime = len(time)<EOL><DEDENT>except TypeError:<EOL><INDENT>outFile = self.find_output_at_time(ifo,time)<EOL><DEDENT>else:<EOL><INDENT>if lenTime == <NUM_LIT:2>:<EOL><INDENT>outFile = self.find_output_in_range(ifo,time[<NUM_LIT:0>],time[<NUM_LIT:1>])<EOL><DEDENT>if len(time) != <NUM_LIT:2>:<EOL><INDENT>raise TypeError("<STR_LIT>")<EOL><DEDENT><DEDENT>return outFile<EOL>
Returns one File most appropriate at the given time/time range. Return one File that covers the given time, or is most appropriate for the supplied time range. Parameters ----------- ifo : string Name of the ifo (or ifos) that the file should be valid for. time : int/float/LIGOGPStime or tuple containing two values If int/float/LIGOGPStime (or similar may of specifying one time) is given, return the File corresponding to the time. This calls self.find_output_at_time(ifo,time). If a tuple of two values is given, return the File that is **most appropriate** for the time range given. This calls self.find_output_in_range Returns -------- pycbc_file : pycbc.workflow.File instance The File that corresponds to the time or time range
f16005:c5:m1
def find_output_at_time(self, ifo, time):
<EOL>outFiles = [i for i in self if ifo in i.ifo_list and time in i.segment_list]<EOL>if len(outFiles) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>elif len(outFiles) == <NUM_LIT:1>:<EOL><INDENT>return outFiles<EOL><DEDENT>else:<EOL><INDENT>return outFiles<EOL><DEDENT>
Return File that covers the given time. Parameters ----------- ifo : string Name of the ifo (or ifos) that the File should correspond to time : int/float/LIGOGPStime Return the Files that covers the supplied time. If no File covers the time this will return None. Returns -------- list of File classes The Files that corresponds to the time.
f16005:c5:m2
def find_outputs_in_range(self, ifo, current_segment, useSplitLists=False):
currsegment_list = segments.segmentlist([current_segment])<EOL>overlap_files = self.find_all_output_in_range(ifo, current_segment,<EOL>useSplitLists=useSplitLists)<EOL>overlap_windows = [abs(i.segment_list & currsegment_list) for i in overlap_files]<EOL>if not overlap_windows:<EOL><INDENT>return []<EOL><DEDENT>overlap_windows = numpy.array(overlap_windows, dtype = int)<EOL>segmentLst = overlap_files[overlap_windows.argmax()].segment_list<EOL>output_files = [f for f in overlap_files if f.segment_list==segmentLst]<EOL>return output_files<EOL>
Return the list of Files that is most appropriate for the supplied time range. That is, the Files whose coverage time has the largest overlap with the supplied time range. Parameters ----------- ifo : string Name of the ifo (or ifos) that the File should correspond to current_segment : glue.segment.segment The segment of time that files must intersect. Returns -------- FileList class The list of Files that are most appropriate for the time range
f16005:c5:m3
def find_output_in_range(self, ifo, start, end):
currsegment_list = segments.segmentlist([segments.segment(start, end)])<EOL>outFiles = [i for i in self if ifo in i.ifo_list]<EOL>if len(outFiles) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>currSeg = segments.segment([start,end])<EOL>outFiles = [i for i in outFilesif i.segment_list.intersects_segment(currSeg)]<EOL>if len(outFiles) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>elif len(outFiles) == <NUM_LIT:1>:<EOL><INDENT>return outFiles[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>overlap_windows = [abs(i.segment_list & currsegment_list)for i in outFiles]<EOL>overlap_windows = numpy.array(overlap_windows, dtype = int)<EOL>return outFiles[overlap_windows.argmax()]<EOL><DEDENT>
Return the File that is most appropriate for the supplied time range. That is, the File whose coverage time has the largest overlap with the supplied time range. If no Files overlap the supplied time window, will return None. Parameters ----------- ifo : string Name of the ifo (or ifos) that the File should correspond to start : int/float/LIGOGPStime The start of the time range of interest. end : int/float/LIGOGPStime The end of the time range of interest Returns -------- File class The File that is most appropriate for the time range
f16005:c5:m4
def find_all_output_in_range(self, ifo, currSeg, useSplitLists=False):
if not useSplitLists:<EOL><INDENT>outFiles = [i for i in self if ifo in i.ifo_list]<EOL>outFiles = [i for i in outFilesif i.segment_list.intersects_segment(currSeg)]<EOL><DEDENT>else:<EOL><INDENT>if not self._check_split_list_validity():<EOL><INDENT>self._temporal_split_list(<NUM_LIT:100>)<EOL><DEDENT>startIdx = int( (currSeg[<NUM_LIT:0>] - self._splitListsStart) /self._splitListsStep )<EOL>endIdx = (currSeg[<NUM_LIT:1>] - self._splitListsStart) / self._splitListsStep<EOL>endIdx = int(endIdx - <NUM_LIT>)<EOL>outFiles = []<EOL>for idx in range(startIdx, endIdx + <NUM_LIT:1>):<EOL><INDENT>if idx < <NUM_LIT:0> or idx >= self._splitListsNum:<EOL><INDENT>continue<EOL><DEDENT>outFilesTemp = [i for i in self._splitLists[idx]if ifo in i.ifo_list]<EOL>outFiles.extend([i for i in outFilesTempif i.segment_list.intersects_segment(currSeg)])<EOL>outFiles = list(set(outFiles))<EOL><DEDENT><DEDENT>return self.__class__(outFiles)<EOL>
Return all files that overlap the specified segment.
f16005:c5:m5
def find_output_with_tag(self, tag):
<EOL>tag = tag.upper()<EOL>return FileList([i for i in self if tag in i.tags])<EOL>
Find all files who have tag in self.tags
f16005:c5:m6
def find_output_without_tag(self, tag):
<EOL>tag = tag.upper()<EOL>return FileList([i for i in self if tag not in i.tags])<EOL>
Find all files who do not have tag in self.tags
f16005:c5:m7
def find_output_with_ifo(self, ifo):
<EOL>ifo = ifo.upper()<EOL>return FileList([i for i in self if ifo in i.ifo_list])<EOL>
Find all files who have ifo = ifo
f16005:c5:m8
def get_times_covered_by_files(self):
times = segments.segmentlist([])<EOL>for entry in self:<EOL><INDENT>times.extend(entry.segment_list)<EOL><DEDENT>times.coalesce()<EOL>return times<EOL>
Find the coalesced intersection of the segments of all files in the list.
f16005:c5:m9
def convert_to_lal_cache(self):
lal_cache = gluelal.Cache([])<EOL>for entry in self:<EOL><INDENT>try:<EOL><INDENT>lal_cache.append(entry.cache_entry)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return lal_cache<EOL>
Return all files in this object as a glue.lal.Cache object
f16005:c5:m10
def _temporal_split_list(self,numSubLists):
<EOL>startTime = float( min([i.segment_list[<NUM_LIT:0>][<NUM_LIT:0>] for i in self]))<EOL>endTime = float( max([i.segment_list[-<NUM_LIT:1>][-<NUM_LIT:1>] for i in self]))<EOL>step = (endTime - startTime) / float(numSubLists)<EOL>self._splitLists = []<EOL>for idx in range(numSubLists):<EOL><INDENT>self._splitLists.append(FileList([]))<EOL><DEDENT>for currFile in self:<EOL><INDENT>segExtent = currFile.segment_list.extent()<EOL>startIdx = (segExtent[<NUM_LIT:0>] - startTime) / step<EOL>endIdx = (segExtent[<NUM_LIT:1>] - startTime) / step<EOL>startIdx = int(startIdx - <NUM_LIT>)<EOL>endIdx = int(endIdx + <NUM_LIT>)<EOL>if startIdx < <NUM_LIT:0>:<EOL><INDENT>startIdx = <NUM_LIT:0><EOL><DEDENT>if endIdx >= numSubLists:<EOL><INDENT>endIdx = numSubLists - <NUM_LIT:1><EOL><DEDENT>for idx in range(startIdx, endIdx + <NUM_LIT:1>):<EOL><INDENT>self._splitLists[idx].append(currFile)<EOL><DEDENT><DEDENT>self._splitListsLength = len(self)<EOL>self._splitListsNum = numSubLists<EOL>self._splitListsStart = startTime<EOL>self._splitListsEnd = endTime<EOL>self._splitListsStep = step<EOL>self._splitListsSet = True<EOL>
This internal function is used to speed the code up in cases where a number of operations are being made to determine if files overlap a specific time. Normally such operations are done on *all* entries with *every* call. However, if we predetermine which files are at which times, we can avoid testing *every* file every time. We therefore create numSubLists distinct and equal length time windows equally spaced from the first time entry in the list until the last. A list is made for each window and files are added to lists which they overlap. If the list changes it should be captured and these split lists become invalid. Currently the testing for this is pretty basic
f16005:c5:m11
def _check_split_list_validity(self):
<EOL>if not (hasattr(self,"<STR_LIT>") and (self._splitListsSet)):<EOL><INDENT>return False<EOL><DEDENT>elif len(self) != self._splitListsLength:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>
See _temporal_split_list above. This function checks if the current split lists are still valid.
f16005:c5:m12
@classmethod<EOL><INDENT>def load(cls, filename):<DEDENT>
f = open(filename, '<STR_LIT:r>')<EOL>return cPickle.load(f)<EOL>
Load an AhopeFileList from a pickle file
f16005:c5:m13
def dump(self, filename):
f = open(filename, '<STR_LIT:w>')<EOL>cPickle.dump(self, f)<EOL>
Output this AhopeFileList to a pickle file
f16005:c5:m14
def to_file_object(self, name, out_dir):
make_analysis_dir(out_dir)<EOL>file_ref = File('<STR_LIT>', name, self.get_times_covered_by_files(),<EOL>extension='<STR_LIT>', directory=out_dir)<EOL>self.dump(file_ref.storage_path)<EOL>return file_ref<EOL>
Dump to a pickle file and return an File object reference of this list Parameters ---------- name : str An identifier of this file. Needs to be unique. out_dir : path path to place this file Returns ------- file : AhopeFile
f16005:c5:m15
def __init__(self, ifo_list, description, valid_segment,<EOL>segment_dict=None, seg_summ_dict=None, **kwargs):
super(SegFile, self).__init__(ifo_list, description, valid_segment,<EOL>**kwargs)<EOL>self.valid_segments = self.segment_list<EOL>self.segment_dict = segment_dict<EOL>self.seg_summ_dict = seg_summ_dict<EOL>
See File.__init__ for a full set of documentation for how to call this class. The only thing unique and added to this class is the optional segment_dict. NOTE that while segment_dict is a ligo.segments.segmentlistdict rather than the usual dict[ifo] we key by dict[ifo:name]. Parameters ------------ ifo_list : string or list (required) See File.__init__ description : string (required) See File.__init__ segment : ligo.segments.segment or ligo.segments.segmentlist See File.__init__ segment_dict : ligo.segments.segmentlistdict (optional, default=None) A ligo.segments.segmentlistdict covering the times covered by the segmentlistdict associated with this file. Can be added by setting self.segment_dict after initializing an instance of the class.
f16005:c6:m0
@classmethod<EOL><INDENT>def from_segment_list(cls, description, segmentlist, name, ifo,<EOL>seg_summ_list=None, **kwargs):<DEDENT>
seglistdict = segments.segmentlistdict()<EOL>seglistdict[ifo + '<STR_LIT::>' + name] = segmentlist<EOL>if seg_summ_list is not None:<EOL><INDENT>seg_summ_dict = segments.segmentlistdict()<EOL>seg_summ_dict[ifo + '<STR_LIT::>' + name] = seg_summ_list<EOL><DEDENT>else:<EOL><INDENT>seg_summ_dict = None<EOL><DEDENT>return cls.from_segment_list_dict(description, seglistdict,<EOL>seg_summ_dict=None, **kwargs)<EOL>
Initialize a SegFile object from a segmentlist. Parameters ------------ description : string (required) See File.__init__ segmentlist : ligo.segments.segmentslist The segment list that will be stored in this file. name : str The name of the segment lists to be stored in the file. ifo : str The ifo of the segment lists to be stored in this file. seg_summ_list : ligo.segments.segmentslist (OPTIONAL) Specify the segment_summary segmentlist that goes along with the segmentlist. Default=None, in this case segment_summary is taken from the valid_segment of the SegFile class.
f16005:c6:m1
@classmethod<EOL><INDENT>def from_multi_segment_list(cls, description, segmentlists, names, ifos,<EOL>seg_summ_lists=None, **kwargs):<DEDENT>
seglistdict = segments.segmentlistdict()<EOL>for name, ifo, segmentlist in zip(names, ifos, segmentlists):<EOL><INDENT>seglistdict[ifo + '<STR_LIT::>' + name] = segmentlist<EOL><DEDENT>if seg_summ_lists is not None:<EOL><INDENT>seg_summ_dict = segments.segmentlistdict()<EOL>for name, ifo, seg_summ_list in zip(names, ifos, seg_summ_lists):<EOL><INDENT>seg_summ_dict[ifo + '<STR_LIT::>' + name] = seg_summ_list<EOL><DEDENT><DEDENT>else:<EOL><INDENT>seg_summ_dict = None<EOL><DEDENT>return cls.from_segment_list_dict(description, seglistdict,<EOL>seg_summ_dict=seg_summ_dict, **kwargs)<EOL>
Initialize a SegFile object from a list of segmentlists. Parameters ------------ description : string (required) See File.__init__ segmentlists : List of ligo.segments.segmentslist List of segment lists that will be stored in this file. names : List of str List of names of the segment lists to be stored in the file. ifos : str List of ifos of the segment lists to be stored in this file. seg_summ_lists : ligo.segments.segmentslist (OPTIONAL) Specify the segment_summary segmentlists that go along with the segmentlists. Default=None, in this case segment_summary is taken from the valid_segment of the SegFile class.
f16005:c6:m2
@classmethod<EOL><INDENT>def from_segment_list_dict(cls, description, segmentlistdict,<EOL>ifo_list=None, valid_segment=None,<EOL>file_exists=False, seg_summ_dict=None,<EOL>**kwargs):<DEDENT>
if ifo_list is None:<EOL><INDENT>ifo_set = set([i.split('<STR_LIT::>')[<NUM_LIT:0>] for i in segmentlistdict.keys()])<EOL>ifo_list = list(ifo_set)<EOL>ifo_list.sort()<EOL><DEDENT>if valid_segment is None:<EOL><INDENT>if seg_summ_dict andnumpy.any([len(v) for _, v in seg_summ_dict.items()]):<EOL><INDENT>valid_segment = seg_summ_dict.extent_all()<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>valid_segment = segmentlistdict.extent_all()<EOL><DEDENT>except:<EOL><INDENT>segmentlistdict=segments.segmentlistdict(segmentlistdict)<EOL>try:<EOL><INDENT>valid_segment = segmentlistdict.extent_all()<EOL><DEDENT>except ValueError:<EOL><INDENT>warn_msg = "<STR_LIT>"<EOL>warn_msg += "<STR_LIT>"<EOL>logging.warn(warn_msg)<EOL>valid_segment = segments.segment([<NUM_LIT:0>,<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>instnc = cls(ifo_list, description, valid_segment,<EOL>segment_dict=segmentlistdict, seg_summ_dict=seg_summ_dict,<EOL>**kwargs)<EOL>if not file_exists:<EOL><INDENT>instnc.to_segment_xml()<EOL><DEDENT>else:<EOL><INDENT>instnc.PFN(urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(<EOL>instnc.storage_path)), site='<STR_LIT>')<EOL><DEDENT>return instnc<EOL>
Initialize a SegFile object from a segmentlistdict. Parameters ------------ description : string (required) See File.__init__ segmentlistdict : ligo.segments.segmentslistdict See SegFile.__init__ ifo_list : string or list (optional) See File.__init__, if not given a list of all ifos in the segmentlistdict object will be used valid_segment : ligo.segments.segment or ligo.segments.segmentlist See File.__init__, if not given the extent of all segments in the segmentlistdict is used. file_exists : boolean (default = False) If provided and set to True it is assumed that this file already exists on disk and so there is no need to write again. seg_summ_dict : ligo.segments.segmentslistdict Optional. See SegFile.__init__.
f16005:c6:m3
@classmethod<EOL><INDENT>def from_segment_xml(cls, xml_file, **kwargs):<DEDENT>
<EOL>fp = open(xml_file, '<STR_LIT:r>')<EOL>xmldoc, _ = ligolw_utils.load_fileobj(fp,<EOL>gz=xml_file.endswith("<STR_LIT>"),<EOL>contenthandler=ContentHandler)<EOL>seg_def_table = table.get_table(xmldoc,<EOL>lsctables.SegmentDefTable.tableName)<EOL>seg_table = table.get_table(xmldoc, lsctables.SegmentTable.tableName)<EOL>seg_sum_table = table.get_table(xmldoc,<EOL>lsctables.SegmentSumTable.tableName)<EOL>segs = segments.segmentlistdict()<EOL>seg_summ = segments.segmentlistdict()<EOL>seg_id = {}<EOL>for seg_def in seg_def_table:<EOL><INDENT>full_channel_name = '<STR_LIT::>'.join([str(seg_def.ifos),<EOL>str(seg_def.name)])<EOL>seg_id[int(seg_def.segment_def_id)] = full_channel_name<EOL>segs[full_channel_name] = segments.segmentlist()<EOL>seg_summ[full_channel_name] = segments.segmentlist()<EOL><DEDENT>for seg in seg_table:<EOL><INDENT>seg_obj = segments.segment(<EOL>lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),<EOL>lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))<EOL>segs[seg_id[int(seg.segment_def_id)]].append(seg_obj)<EOL><DEDENT>for seg in seg_sum_table:<EOL><INDENT>seg_obj = segments.segment(<EOL>lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),<EOL>lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))<EOL>seg_summ[seg_id[int(seg.segment_def_id)]].append(seg_obj)<EOL><DEDENT>for seg_name in seg_id.values():<EOL><INDENT>segs[seg_name] = segs[seg_name].coalesce()<EOL><DEDENT>xmldoc.unlink()<EOL>fp.close()<EOL>curr_url = urlparse.urlunparse(['<STR_LIT:file>', '<STR_LIT:localhost>', xml_file, None,<EOL>None, None])<EOL>return cls.from_segment_list_dict('<STR_LIT>', segs, file_url=curr_url,<EOL>file_exists=True,<EOL>seg_summ_dict=seg_summ, **kwargs)<EOL>
Read a ligo.segments.segmentlist from the file object file containing an xml segment table. Parameters ----------- xml_file : file object file object for segment xml file
f16005:c6:m4
def remove_short_sci_segs(self, minSegLength):
newsegment_list = segments.segmentlist()<EOL>for key, seglist in self.segment_dict.items():<EOL><INDENT>newsegment_list = segments.segmentlist()<EOL>for seg in seglist:<EOL><INDENT>if abs(seg) > minSegLength:<EOL><INDENT>newsegment_list.append(seg)<EOL><DEDENT><DEDENT>newsegment_list.coalesce()<EOL>self.segment_dict[key] = newsegment_list<EOL><DEDENT>self.to_segment_xml(override_file_if_exists=True)<EOL>
Function to remove all science segments shorter than a specific length. Also updates the file on disk to remove these segments. Parameters ----------- minSegLength : int Maximum length of science segments. Segments shorter than this will be removed.
f16005:c6:m5
def parse_segdict_key(self, key):
splt = key.split('<STR_LIT::>')<EOL>if len(splt) == <NUM_LIT:2>:<EOL><INDENT>return splt[<NUM_LIT:0>], splt[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>err_msg = "<STR_LIT>" %(key,)<EOL>raise ValueError(err_msg)<EOL><DEDENT>
Return ifo and name from the segdict key.
f16005:c6:m7
def to_segment_xml(self, override_file_if_exists=False):
<EOL>outdoc = ligolw.Document()<EOL>outdoc.appendChild(ligolw.LIGO_LW())<EOL>process = ligolw_process.register_to_xmldoc(outdoc, sys.argv[<NUM_LIT:0>], {})<EOL>for key, seglist in self.segment_dict.items():<EOL><INDENT>ifo, name = self.parse_segdict_key(key)<EOL>fsegs = [(lal.LIGOTimeGPS(seg[<NUM_LIT:0>]),<EOL>lal.LIGOTimeGPS(seg[<NUM_LIT:1>])) for seg in seglist]<EOL>if self.seg_summ_dict is None:<EOL><INDENT>vsegs = [(lal.LIGOTimeGPS(seg[<NUM_LIT:0>]),<EOL>lal.LIGOTimeGPS(seg[<NUM_LIT:1>]))for seg in self.valid_segments]<EOL><DEDENT>else:<EOL><INDENT>vsegs = [(lal.LIGOTimeGPS(seg[<NUM_LIT:0>]),<EOL>lal.LIGOTimeGPS(seg[<NUM_LIT:1>]))for seg in self.seg_summ_dict[key]]<EOL><DEDENT>with ligolw_segments.LigolwSegments(outdoc, process) as x:<EOL><INDENT>x.add(ligolw_segments.LigolwSegmentList(active=fsegs,<EOL>instruments=set([ifo]), name=name,<EOL>version=<NUM_LIT:1>, valid=vsegs))<EOL><DEDENT><DEDENT>url = urlparse.urljoin('<STR_LIT>', urllib.pathname2url(self.storage_path))<EOL>if not override_file_if_exists or not self.has_pfn(url, site='<STR_LIT>'):<EOL><INDENT>self.PFN(url, site='<STR_LIT>')<EOL><DEDENT>ligolw_utils.write_filename(outdoc, self.storage_path)<EOL>
Write the segment list in self.segmentList to self.storage_path.
f16005:c6:m8
def set_memory(self, size):
self.add_profile('<STR_LIT>', '<STR_LIT>', '<STR_LIT>' % size)<EOL>
Set the amount of memory that is required in megabytes
f16006:c0:m0
def set_storage(self, size):
self.add_profile('<STR_LIT>', '<STR_LIT>', '<STR_LIT>' % size)<EOL>
Set the amount of storage required in megabytes
f16006:c0:m1
def add_profile(self, namespace, key, value, force=False):
try:<EOL><INDENT>entry = dax.Profile(namespace, key, value)<EOL>self._dax_executable.addProfile(entry)<EOL><DEDENT>except dax.DuplicateError:<EOL><INDENT>if force:<EOL><INDENT>self._dax_executable.removeProfile(entry)<EOL>self._dax_executable.addProfile(entry)<EOL><DEDENT><DEDENT>
Add profile information to this executable
f16006:c1:m5
def add_arg(self, arg):
if not isinstance(arg, File):<EOL><INDENT>arg = str(arg)<EOL><DEDENT>self._args += [arg]<EOL>
Add an argument
f16006:c2:m1
def add_raw_arg(self, arg):
if not isinstance(arg, File):<EOL><INDENT>arg = str(arg)<EOL><DEDENT>self._raw_options += [arg]<EOL>
Add an argument to the command line of this job, but do *NOT* add white space between arguments. This can be added manually by adding ' ' if needed
f16006:c2:m2
def add_opt(self, opt, value=None):
if value is not None:<EOL><INDENT>if not isinstance(value, File):<EOL><INDENT>value = str(value)<EOL><DEDENT>self._options += [opt, value]<EOL><DEDENT>else:<EOL><INDENT>self._options += [opt]<EOL><DEDENT>
Add a option
f16006:c2:m3
def _add_input(self, inp):
self._inputs += [inp]<EOL>inp._set_as_input_of(self)<EOL>
Add as source of input data
f16006:c2:m4
def _add_output(self, out):
self._outputs += [out]<EOL>out.node = self<EOL>out._set_as_output_of(self)<EOL>
Add as destination of output data
f16006:c2:m5
def add_input_opt(self, opt, inp):
self.add_opt(opt, inp._dax_repr())<EOL>self._add_input(inp)<EOL>
Add an option that determines an input
f16006:c2:m6
def add_output_opt(self, opt, out):
self.add_opt(opt, out._dax_repr())<EOL>self._add_output(out)<EOL>
Add an option that determines an output
f16006:c2:m7
def add_output_list_opt(self, opt, outputs):
self.add_opt(opt)<EOL>for out in outputs:<EOL><INDENT>self.add_opt(out)<EOL>self._add_output(out)<EOL><DEDENT>
Add an option that determines a list of outputs
f16006:c2:m8
def add_input_list_opt(self, opt, inputs):
self.add_opt(opt)<EOL>for inp in inputs:<EOL><INDENT>self.add_opt(inp)<EOL>self._add_input(inp)<EOL><DEDENT>
Add an option that determines a list of inputs
f16006:c2:m9
def add_list_opt(self, opt, values):
self.add_opt(opt)<EOL>for val in values:<EOL><INDENT>self.add_opt(val)<EOL><DEDENT>
Add an option with a list of non-file parameters.
f16006:c2:m10
def add_input_arg(self, inp):
self.add_arg(inp._dax_repr())<EOL>self._add_input(inp)<EOL>
Add an input as an argument
f16006:c2:m11
def add_output_arg(self, out):
self.add_arg(out._dax_repr())<EOL>self._add_output(out)<EOL>
Add an output as an argument
f16006:c2:m12
def new_output_file_opt(self, opt, name):
fil = File(name)<EOL>self.add_output_opt(opt, fil)<EOL>return fil<EOL>
Add an option and return a new file handle
f16006:c2:m13
def add_profile(self, namespace, key, value, force=False):
try:<EOL><INDENT>entry = dax.Profile(namespace, key, value)<EOL>self._dax_node.addProfile(entry)<EOL><DEDENT>except dax.DuplicateError:<EOL><INDENT>if force:<EOL><INDENT>self._dax_node.removeProfile(entry)<EOL>self._dax_node.addProfile(entry)<EOL><DEDENT><DEDENT>
Add profile information to this node at the DAX level
f16006:c2:m14
def add_workflow(self, workflow):
workflow.in_workflow = self<EOL>self.sub_workflows += [workflow]<EOL>node = workflow.as_job<EOL>self._adag.addJob(node)<EOL>node.file.PFN(os.path.join(os.getcwd(), node.file.name), site='<STR_LIT>')<EOL>self._adag.addFile(node.file)<EOL>for inp in self._external_workflow_inputs:<EOL><INDENT>workflow._make_root_dependency(inp.node)<EOL><DEDENT>return self<EOL>
Add a sub-workflow to this workflow This function adds a sub-workflow of Workflow class to this workflow. Parent child relationships are determined by data dependencies Parameters ---------- workflow : Workflow instance The sub-workflow to add to this one
f16006:c3:m2