signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def add_node(self, node):
node._finalize()<EOL>node.in_workflow = self<EOL>self._adag.addJob(node._dax_node)<EOL>added_nodes = []<EOL>for inp in node._inputs:<EOL><INDENT>if inp.node is not None and inp.node.in_workflow == self:<EOL><INDENT>if inp.node not in added_nodes:<EOL><INDENT>parent = inp.node._dax_node<EOL>child = node._dax_node<EOL>dep = dax.Dependency(parent=parent, child=child)<EOL>self._adag.addDependency(dep)<EOL>added_nodes.append(inp.node)<EOL><DEDENT><DEDENT>elif inp.node is not None and not inp.node.in_workflow:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>elif inp.node is None and not inp.workflow_input:<EOL><INDENT>self._inputs += [inp]<EOL>inp.workflow_input = True<EOL><DEDENT>elif inp.node is not None and inp.node.in_workflow != self and inp not in self._inputs:<EOL><INDENT>self._inputs += [inp]<EOL>self._external_workflow_inputs += [inp]<EOL><DEDENT><DEDENT>self._outputs += node._outputs<EOL>if not node.executable.in_workflow:<EOL><INDENT>node.executable.in_workflow = True<EOL>self._executables += [node.executable]<EOL><DEDENT>return self<EOL>
Add a node to this workflow This function adds nodes to the workflow. It also determines parent/child relations from the DataStorage inputs to this job. Parameters ---------- node : pycbc.workflow.pegasus_workflow.Node A node that should be executed as part of this workflow.
f16006:c3:m3
def save(self, filename=None, tc=None):
if filename is None:<EOL><INDENT>filename = self.filename<EOL><DEDENT>for sub in self.sub_workflows:<EOL><INDENT>sub.save()<EOL><DEDENT>if tc is None:<EOL><INDENT>tc = '<STR_LIT>'.format(filename)<EOL><DEDENT>p = os.path.dirname(tc)<EOL>f = os.path.basename(tc)<EOL>if not p:<EOL><INDENT>p = '<STR_LIT:.>'<EOL><DEDENT>tc = TransformationCatalog(p, f)<EOL>for e in self._adag.executables.copy():<EOL><INDENT>tc.add(e)<EOL>try:<EOL><INDENT>tc.add_container(e.container)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>self._adag.removeExecutable(e)<EOL><DEDENT>f = open(filename, "<STR_LIT:w>")<EOL>self._adag.writeXML(f)<EOL>tc.write()<EOL>
Write this workflow to DAX file
f16006:c3:m5
@property<EOL><INDENT>def dax_repr(self):<DEDENT>
return self._dax_repr()<EOL>
Return the dax representation of a File.
f16006:c5:m2
def has_pfn(self, url, site=None):
curr_pfn = dax.PFN(url, site)<EOL>return self.hasPFN(curr_pfn)<EOL>
Wrapper of the pegasus hasPFN function, that allows it to be called outside of specific pegasus functions.
f16006:c5:m6
@classmethod<EOL><INDENT>def from_path(cls, path):<DEDENT>
urlparts = urlparse.urlsplit(path)<EOL>site = '<STR_LIT>'<EOL>if (urlparts.scheme == '<STR_LIT>' or urlparts.scheme == '<STR_LIT:file>'):<EOL><INDENT>if os.path.isfile(urlparts.path):<EOL><INDENT>path = os.path.abspath(urlparts.path)<EOL>path = urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(path)) <EOL>site = '<STR_LIT>'<EOL><DEDENT><DEDENT>fil = File(os.path.basename(path))<EOL>fil.PFN(path, site)<EOL>return fil<EOL>
Takes a path and returns a File object with the path as the PFN.
f16006:c5:m8
def get_science_segments(workflow, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>make_analysis_dir(out_dir)<EOL>start_time = workflow.analysis_time[<NUM_LIT:0>]<EOL>end_time = workflow.analysis_time[<NUM_LIT:1>]<EOL>sci_seg_name = "<STR_LIT>"<EOL>sci_segs = {}<EOL>sci_seg_dict = segments.segmentlistdict()<EOL>sci_seg_summ_dict = segments.segmentlistdict()<EOL>for ifo in workflow.ifos:<EOL><INDENT>curr_sci_segs, curr_sci_xml, curr_seg_name = get_sci_segs_for_ifo(ifo,<EOL>workflow.cp, start_time, end_time, out_dir, tags)<EOL>sci_seg_dict[ifo + '<STR_LIT::>' + sci_seg_name] = curr_sci_segs<EOL>sci_segs[ifo] = curr_sci_segs<EOL>sci_seg_summ_dict[ifo + '<STR_LIT::>' + sci_seg_name] =curr_sci_xml.seg_summ_dict[ifo + '<STR_LIT::>' + curr_seg_name]<EOL><DEDENT>sci_seg_file = SegFile.from_segment_list_dict(sci_seg_name,<EOL>sci_seg_dict, extension='<STR_LIT>',<EOL>valid_segment=workflow.analysis_time,<EOL>seg_summ_dict=sci_seg_summ_dict,<EOL>directory=out_dir, tags=tags)<EOL>logging.info('<STR_LIT>')<EOL>return sci_seg_file, sci_segs, sci_seg_name<EOL>
Get the analyzable segments after applying ini specified vetoes. Parameters ----------- workflow : Workflow object Instance of the workflow object out_dir : path Location to store output files tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- sci_seg_file : workflow.core.SegFile instance The segment file combined from all ifos containing the science segments. sci_segs : Ifo keyed dict of ligo.segments.segmentlist instances The science segs for each ifo, keyed by ifo sci_seg_name : str The name with which science segs are stored in the output XML file.
f16007:m0
def get_files_for_vetoes(workflow, out_dir,<EOL>runtime_names=None, in_workflow_names=None, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>if runtime_names is None:<EOL><INDENT>runtime_names = []<EOL><DEDENT>if in_workflow_names is None:<EOL><INDENT>in_workflow_names = []<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>make_analysis_dir(out_dir)<EOL>start_time = workflow.analysis_time[<NUM_LIT:0>]<EOL>end_time = workflow.analysis_time[<NUM_LIT:1>]<EOL>save_veto_definer(workflow.cp, out_dir, tags)<EOL>now_cat_sets = []<EOL>for name in runtime_names:<EOL><INDENT>cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags(<EOL>'<STR_LIT>', name, tags))<EOL>now_cat_sets.extend(cat_sets)<EOL><DEDENT>now_cats = set()<EOL>for cset in now_cat_sets:<EOL><INDENT>now_cats = now_cats.union(cset)<EOL><DEDENT>later_cat_sets = []<EOL>for name in in_workflow_names:<EOL><INDENT>cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags(<EOL>'<STR_LIT>', name, tags))<EOL>later_cat_sets.extend(cat_sets)<EOL><DEDENT>later_cats = set()<EOL>for cset in later_cat_sets:<EOL><INDENT>later_cats = later_cats.union(cset)<EOL>later_cats = later_cats - now_cats<EOL><DEDENT>veto_gen_job = create_segs_from_cats_job(workflow.cp, out_dir,<EOL>workflow.ifo_string, tags=tags)<EOL>cat_files = FileList()<EOL>for ifo in workflow.ifos:<EOL><INDENT>for category in now_cats:<EOL><INDENT>cat_files.append(get_veto_segs(workflow, ifo,<EOL>cat_to_veto_def_cat(category),<EOL>start_time, end_time, out_dir,<EOL>veto_gen_job, execute_now=True,<EOL>tags=tags))<EOL><DEDENT>for category in later_cats:<EOL><INDENT>cat_files.append(get_veto_segs(workflow, ifo,<EOL>cat_to_veto_def_cat(category),<EOL>start_time, end_time, out_dir,<EOL>veto_gen_job, tags=tags,<EOL>execute_now=False))<EOL><DEDENT><DEDENT>logging.info('<STR_LIT>')<EOL>return cat_files<EOL>
Get the various sets of veto segments that will be used in this analysis. Parameters ----------- workflow : Workflow object Instance of the workflow object out_dir : path Location to store output files runtime_names : list Veto category groups with these names in the [workflow-segment] section of the ini file will be generated now. in_workflow_names : list Veto category groups with these names in the [workflow-segment] section of the ini file will be generated in the workflow. If a veto category appears here and in runtime_names, it will be generated now. tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- veto_seg_files : FileList List of veto segment files generated
f16007:m1
def get_analyzable_segments(workflow, sci_segs, cat_files, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>make_analysis_dir(out_dir)<EOL>sci_ok_seg_name = "<STR_LIT>"<EOL>sci_ok_seg_dict = segments.segmentlistdict()<EOL>sci_ok_segs = {}<EOL>cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags('<STR_LIT>',<EOL>'<STR_LIT>', tags))<EOL>if len(cat_sets) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>cat_set = cat_sets[<NUM_LIT:0>]<EOL>for ifo in workflow.ifos:<EOL><INDENT>curr_segs = copy.copy(sci_segs[ifo])<EOL>files = cat_files.find_output_with_ifo(ifo)<EOL>for category in cat_set:<EOL><INDENT>veto_def_cat = cat_to_veto_def_cat(category)<EOL>file_list = files.find_output_with_tag('<STR_LIT>' %(veto_def_cat))<EOL>if len(file_list) > <NUM_LIT:1>:<EOL><INDENT>err_msg = "<STR_LIT>" %(ifo,)<EOL>err_msg += "<STR_LIT>" %(category,)<EOL>raise ValueError(err_msg)<EOL><DEDENT>if len(file_list) == <NUM_LIT:0>:<EOL><INDENT>err_msg = "<STR_LIT>" %(ifo,)<EOL>err_msg += "<STR_LIT>" %(category,)<EOL>raise ValueError(err_msg)<EOL><DEDENT>curr_veto_file = file_list[<NUM_LIT:0>]<EOL>cat_segs = curr_veto_file.return_union_seglist()<EOL>curr_segs -= cat_segs<EOL>curr_segs.coalesce()<EOL><DEDENT>sci_ok_seg_dict[ifo + '<STR_LIT::>' + sci_ok_seg_name] = curr_segs<EOL><DEDENT>sci_ok_seg_file = SegFile.from_segment_list_dict(sci_ok_seg_name,<EOL>sci_ok_seg_dict, extension='<STR_LIT>',<EOL>valid_segment=workflow.analysis_time,<EOL>directory=out_dir, tags=tags)<EOL>if workflow.cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>min_seg_length = int( workflow.cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags) )<EOL>sci_ok_seg_file.remove_short_sci_segs(min_seg_length)<EOL><DEDENT>for ifo in workflow.ifos:<EOL><INDENT>sci_ok_segs[ifo] =sci_ok_seg_file.segment_dict[ifo + '<STR_LIT::>' + sci_ok_seg_name]<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>return sci_ok_seg_file, sci_ok_segs, sci_ok_seg_name<EOL>
Get the analyzable segments after applying ini specified vetoes and any other restrictions on the science segs, e.g. a minimum segment length, or demanding that only coincident segments are analysed. Parameters ----------- workflow : Workflow object Instance of the workflow object sci_segs : Ifo-keyed dictionary of glue.segmentlists The science segments for each ifo to which the vetoes, or any other restriction, will be applied. cat_files : FileList of SegFiles The category veto files generated by get_veto_segs out_dir : path Location to store output files tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- sci_ok_seg_file : workflow.core.SegFile instance The segment file combined from all ifos containing the analyzable science segments. sci_ok_segs : Ifo keyed dict of ligo.segments.segmentlist instances The analyzable science segs for each ifo, keyed by ifo sci_ok_seg_name : str The name with which analyzable science segs are stored in the output XML file.
f16007:m2
def get_cumulative_veto_group_files(workflow, option, cat_files,<EOL>out_dir, execute_now=True, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>" %(option))<EOL>make_analysis_dir(out_dir)<EOL>cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags('<STR_LIT>',<EOL>option, tags))<EOL>cum_seg_files = FileList()<EOL>names = []<EOL>for cat_set in cat_sets:<EOL><INDENT>segment_name = "<STR_LIT>" % ('<STR_LIT>'.join(sorted(cat_set)))<EOL>logging.info('<STR_LIT>' % segment_name)<EOL>categories = [cat_to_veto_def_cat(c) for c in cat_set]<EOL>cum_seg_files += [get_cumulative_segs(workflow, categories, cat_files,<EOL>out_dir, execute_now=execute_now,<EOL>segment_name=segment_name, tags=tags)]<EOL>names.append(segment_name)<EOL><DEDENT>logging.info("<STR_LIT>" %(option))<EOL>return cum_seg_files, names, cat_files<EOL>
Get the cumulative veto files that define the different backgrounds we want to analyze, defined by groups of vetos. Parameters ----------- workflow : Workflow object Instance of the workflow object option : str ini file option to use to get the veto groups cat_files : FileList of SegFiles The category veto files generated by get_veto_segs out_dir : path Location to store output files execute_now : Boolean If true outputs are generated at runtime. Else jobs go into the workflow and are generated then. tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- seg_files : workflow.core.FileList instance The cumulative segment files for each veto group. names : list of strings The segment names for the corresponding seg_file cat_files : workflow.core.FileList instance The list of individual category veto files
f16007:m3
def setup_segment_generation(workflow, out_dir, tag=None):
logging.info("<STR_LIT>")<EOL>make_analysis_dir(out_dir)<EOL>cp = workflow.cp<EOL>segmentsMethod = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", [tag])<EOL>if segmentsMethod in ['<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>veto_cats = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", [tag])<EOL>max_veto_cat = max([int(c) for c in veto_cats.split('<STR_LIT:U+002C>')])<EOL>veto_categories = range(<NUM_LIT:1>, max_veto_cat + <NUM_LIT:1>)<EOL>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", [tag]):<EOL><INDENT>generate_coincident_segs = True<EOL><DEDENT>else:<EOL><INDENT>generate_coincident_segs = False<EOL><DEDENT>vetoDefUrl = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", [tag])<EOL>vetoDefBaseName = os.path.basename(vetoDefUrl)<EOL>vetoDefNewPath = os.path.join(out_dir, vetoDefBaseName)<EOL>resolve_url(vetoDefUrl,out_dir)<EOL>cp.set("<STR_LIT>", "<STR_LIT>",<EOL>vetoDefNewPath)<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", [tag]):<EOL><INDENT>minSegLength = int( cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", [tag]) )<EOL><DEDENT>else:<EOL><INDENT>minSegLength = <NUM_LIT:0><EOL><DEDENT>if segmentsMethod == "<STR_LIT>":<EOL><INDENT>max_veto = <NUM_LIT:1000><EOL><DEDENT>elif segmentsMethod == "<STR_LIT>":<EOL><INDENT>max_veto = <NUM_LIT:1><EOL><DEDENT>elif segmentsMethod == "<STR_LIT>":<EOL><INDENT>max_veto = <NUM_LIT:2><EOL><DEDENT>elif segmentsMethod == "<STR_LIT>":<EOL><INDENT>max_veto = <NUM_LIT:3><EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>"<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>"<EOL>raise ValueError(msg)<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>segFilesList = setup_segment_gen_mixed(workflow, veto_categories,<EOL>out_dir, max_veto, tag=tag,<EOL>generate_coincident_segs=generate_coincident_segs)<EOL>logging.info("<STR_LIT>")<EOL>segsToAnalyse = {}<EOL>for ifo in workflow.ifos:<EOL><INDENT>analSegs = segFilesList.find_output_with_ifo(ifo)<EOL>analSegs = analSegs.find_output_with_tag('<STR_LIT>')<EOL>assert len(analSegs) == <NUM_LIT:1><EOL>analSegs = analSegs[<NUM_LIT:0>]<EOL>if analSegs.segment_list:<EOL><INDENT>if minSegLength:<EOL><INDENT>analSegs.remove_short_sci_segs(minSegLength)<EOL>analSegs.to_segment_xml(override_file_if_exists=True)<EOL><DEDENT>segsToAnalyse[ifo] = analSegs.segment_list<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>" %(ifo)<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>" %(out_dir)<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>" %(os.path.join(out_dir,'<STR_LIT>'))<EOL>logging.warn(msg)<EOL><DEDENT><DEDENT>logging.info("<STR_LIT>")<EOL>return segsToAnalyse, segFilesList<EOL>
This function is the gateway for setting up the segment generation steps in a workflow. It is designed to be able to support multiple ways of obtaining these segments and to combine/edit such files as necessary for analysis. The current modules have the capability to generate files at runtime or to generate files that are not needed for workflow generation within the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The workflow instance that the coincidence jobs will be added to. This instance also contains the ifos for which to attempt to obtain segments for this analysis and the start and end times to search for segments over. out_dir : path The directory in which output will be stored. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns ------- segsToAnalyse : dictionay of ifo-keyed glue.segment.segmentlist instances This will contain the times that your code should analyse. By default this is science time - CAT_1 vetoes. (This default could be changed if desired) segFilesList : pycbc.workflow.core.FileList of SegFile instances These are representations of the various segment files that were constructed at this stage of the workflow and may be needed at later stages of the analysis (e.g. for performing DQ vetoes). If the file was generated at run-time the segment lists contained within these files will be an attribute of the instance. (If it will be generated in the workflow it will not be because I am not psychic).
f16007:m4
def setup_segment_gen_mixed(workflow, veto_categories, out_dir,<EOL>maxVetoAtRunTime, tag=None,<EOL>generate_coincident_segs=True):
cp = workflow.cp<EOL>segFilesList = FileList([])<EOL>start_time = workflow.analysis_time[<NUM_LIT:0>]<EOL>end_time = workflow.analysis_time[<NUM_LIT:1>]<EOL>segValidSeg = workflow.analysis_time<EOL>vetoGenJob = create_segs_from_cats_job(cp, out_dir, workflow.ifo_string)<EOL>for ifo in workflow.ifos:<EOL><INDENT>logging.info("<STR_LIT>" %(ifo))<EOL>currSciSegs, currSciXmlFile, _ = get_sci_segs_for_ifo(ifo, cp,<EOL>start_time, end_time, out_dir, tags=tag)<EOL>segFilesList.append(currSciXmlFile)<EOL>for category in veto_categories:<EOL><INDENT>if category > maxVetoAtRunTime:<EOL><INDENT>msg = "<STR_LIT>" %(category)<EOL>msg += "<STR_LIT>" %(ifo)<EOL>logging.info(msg)<EOL>execute_status = False<EOL><DEDENT>if category <= maxVetoAtRunTime:<EOL><INDENT>logging.info("<STR_LIT>"%(category,ifo))<EOL>execute_status = True<EOL><DEDENT>currVetoXmlFile = get_veto_segs(workflow, ifo, category,<EOL>start_time, end_time, out_dir,<EOL>vetoGenJob,<EOL>execute_now=execute_status)<EOL>segFilesList.append(currVetoXmlFile)<EOL>if category == <NUM_LIT:1>:<EOL><INDENT>cat1Segs = currVetoXmlFile.return_union_seglist()<EOL><DEDENT><DEDENT>analysedSegs = currSciSegs - cat1Segs<EOL>analysedSegs.coalesce()<EOL>analysedSegDict = segments.segmentlistdict()<EOL>analysedSegDict[ifo + '<STR_LIT>'] = analysedSegs<EOL>analysedXmlFile = os.path.join(out_dir,<EOL>"<STR_LIT>" %(ifo.upper()) )<EOL>currUrl = urlparse.urlunparse(['<STR_LIT:file>', '<STR_LIT:localhost>', analysedXmlFile,<EOL>None, None, None])<EOL>if tag:<EOL><INDENT>currTags = [tag, '<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>currTags = ['<STR_LIT>']<EOL><DEDENT>currFile = SegFile(ifo, '<STR_LIT>', analysedSegs,<EOL>segment_dict=analysedSegDict, file_url=currUrl,<EOL>tags=currTags)<EOL>segFilesList.append(currFile)<EOL>currFile.to_segment_xml()<EOL><DEDENT>if generate_coincident_segs:<EOL><INDENT>ifo_string = workflow.ifo_string<EOL>categories = []<EOL>cum_cat_files = []<EOL>for category in veto_categories:<EOL><INDENT>categories.append(category)<EOL>if tag:<EOL><INDENT>currTags = [tag]<EOL><DEDENT>else:<EOL><INDENT>currTags = []<EOL><DEDENT>logging.info("<STR_LIT>"%(category))<EOL>if category <= maxVetoAtRunTime:<EOL><INDENT>execute_status = True<EOL><DEDENT>else:<EOL><INDENT>execute_status = False<EOL><DEDENT>currSegFile = get_cumulative_segs(workflow, categories,<EOL>segFilesList, out_dir,<EOL>execute_now=execute_status, tags=currTags)<EOL>segFilesList.append(currSegFile)<EOL>cum_cat_files.append(currSegFile)<EOL><DEDENT>if tag:<EOL><INDENT>currTags = [tag, '<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>currTags = ['<STR_LIT>']<EOL><DEDENT>combined_veto_file = os.path.join(out_dir,<EOL>'<STR_LIT>'%(ifo_string) )<EOL>curr_url = urlparse.urlunparse(['<STR_LIT:file>', '<STR_LIT:localhost>',<EOL>combined_veto_file, None, None, None])<EOL>curr_file = SegFile(ifo_string, '<STR_LIT>', segValidSeg,<EOL>file_url=curr_url, tags=currTags)<EOL>for category in veto_categories:<EOL><INDENT>if category <= maxVetoAtRunTime:<EOL><INDENT>execute_status = True<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>execute_status = False<EOL><DEDENT>add_cumulative_files(workflow, curr_file, cum_cat_files, out_dir,<EOL>execute_now=execute_status)<EOL>segFilesList.append(curr_file)<EOL><DEDENT>return segFilesList<EOL>
This function will generate veto files for each ifo and for each veto category. It can generate these vetoes at run-time or in the workflow (or do some at run-time and some in the workflow). However, the CAT_1 vetoes and science time must be generated at run time as they are needed to plan the workflow. CATs 2 and higher *may* be needed for other workflow construction. It can also combine these files to create a set of cumulative, multi-detector veto files, which can be used in ligolw_thinca and in pipedown. Again these can be created at run time or within the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the coincidence jobs will be added to. This instance also contains the ifos for which to attempt to obtain segments for this analysis and the start and end times to search for segments over. veto_categories : list of ints List of veto categories to generate segments for. If this stops being integers, this can be changed here. out_dir : path The directory in which output will be stored. maxVetoAtRunTime : int Generate veto files at run time up to this category. Veto categories beyond this in veto_categories will be generated in the workflow. If we move to a model where veto categories are not explicitly cumulative, this will be rethought. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! generate_coincident_segs : boolean, optional (default = True) If given this module will generate a set of coincident, cumulative veto files that can be used with ligolw_thinca and pipedown. Returns ------- segFilesList : dictionary of pycbc.workflow.core.SegFile instances These are representations of the various segment files that were constructed at this stage of the workflow and may be needed at later stages of the analysis (e.g. for performing DQ vetoes). If the file was generated at run-time the segment lists contained within these files will be an attribute of the instance. (If it will be generated in the workflow it will not be because I am not psychic).
f16007:m5
def get_sci_segs_for_ifo(ifo, cp, start_time, end_time, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>seg_valid_seg = segments.segment([start_time,end_time])<EOL>sci_seg_name = cp.get_opt_tags(<EOL>"<STR_LIT>", "<STR_LIT>" %(ifo.lower()), tags)<EOL>sci_seg_url = cp.get_opt_tags(<EOL>"<STR_LIT>", "<STR_LIT>", tags)<EOL>out_sci_seg_name = "<STR_LIT>"<EOL>if tags:<EOL><INDENT>sci_xml_file_path = os.path.join(<EOL>out_dir, "<STR_LIT>"%(ifo.upper(), '<STR_LIT:_>'.join(tags)))<EOL>tag_list=tags + ['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>sci_xml_file_path = os.path.join(<EOL>out_dir, "<STR_LIT>" %(ifo.upper()) )<EOL>tag_list = ['<STR_LIT>']<EOL><DEDENT>if file_needs_generating(sci_xml_file_path, cp, tags=tags):<EOL><INDENT>seg_find_call = [ resolve_url(cp.get("<STR_LIT>","<STR_LIT>"),<EOL>permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR),<EOL>"<STR_LIT>",<EOL>"<STR_LIT>", sci_seg_url,<EOL>"<STR_LIT>", str(start_time),<EOL>"<STR_LIT>", str(end_time),<EOL>"<STR_LIT>", sci_seg_name,<EOL>"<STR_LIT>", sci_xml_file_path ]<EOL>make_external_call(seg_find_call, out_dir=os.path.join(out_dir,'<STR_LIT>'),<EOL>out_basename='<STR_LIT>' %(ifo.lower()) )<EOL><DEDENT>sci_xml_file_path = os.path.abspath(sci_xml_file_path)<EOL>sci_xml_file = SegFile.from_segment_xml(sci_xml_file_path, tags=tag_list,<EOL>valid_segment=seg_valid_seg)<EOL>sci_xml_file.seg_summ_dict[ifo.upper() + "<STR_LIT::>" + out_sci_seg_name] =sci_xml_file.seg_summ_dict['<STR_LIT::>'.join(sci_seg_name.split('<STR_LIT::>')[<NUM_LIT:0>:<NUM_LIT:2>])]<EOL>sci_segs = sci_xml_file.return_union_seglist()<EOL>return sci_segs, sci_xml_file, out_sci_seg_name<EOL>
Obtain science segments for the selected ifo Parameters ----------- ifo : string The string describing the ifo to obtain science times for. start_time : gps time (either int/LIGOTimeGPS) The time at which to begin searching for segments. end_time : gps time (either int/LIGOTimeGPS) The time at which to stop searching for segments. out_dir : path The directory in which output will be stored. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. Returns -------- sci_segs : ligo.segments.segmentlist The segmentlist generated by this call sci_xml_file : pycbc.workflow.core.SegFile The workflow File object corresponding to this science segments file. out_sci_seg_name : string The name of the output segment list in the output XML file.
f16007:m6
def get_veto_segs(workflow, ifo, category, start_time, end_time, out_dir,<EOL>veto_gen_job, tags=None, execute_now=False):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>seg_valid_seg = segments.segment([start_time,end_time])<EOL>node = Node(veto_gen_job)<EOL>node.add_opt('<STR_LIT>', str(category))<EOL>node.add_opt('<STR_LIT>', ifo)<EOL>node.add_opt('<STR_LIT>', str(start_time))<EOL>node.add_opt('<STR_LIT>', str(end_time))<EOL>if tags:<EOL><INDENT>veto_xml_file_name = "<STR_LIT>"%(ifo, category, '<STR_LIT:_>'.join(tags), start_time,<EOL>end_time-start_time)<EOL><DEDENT>else:<EOL><INDENT>veto_xml_file_name = "<STR_LIT>"%(ifo, category, start_time, end_time-start_time)<EOL><DEDENT>veto_xml_file_path = os.path.abspath(os.path.join(out_dir,<EOL>veto_xml_file_name))<EOL>curr_url = urlparse.urlunparse(['<STR_LIT:file>', '<STR_LIT:localhost>',<EOL>veto_xml_file_path, None, None, None])<EOL>if tags:<EOL><INDENT>curr_tags = tags + ['<STR_LIT>' %(category)]<EOL><DEDENT>else:<EOL><INDENT>curr_tags = ['<STR_LIT>' %(category)]<EOL><DEDENT>if file_needs_generating(veto_xml_file_path, workflow.cp, tags=tags):<EOL><INDENT>if execute_now:<EOL><INDENT>workflow.execute_node(node, verbatim_exe = True)<EOL>veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path,<EOL>tags=curr_tags,<EOL>valid_segment=seg_valid_seg)<EOL><DEDENT>else:<EOL><INDENT>veto_xml_file = SegFile(ifo, '<STR_LIT>', seg_valid_seg,<EOL>file_url=curr_url, tags=curr_tags)<EOL>node._add_output(veto_xml_file)<EOL>workflow.add_node(node)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>node.executed = True<EOL>for fil in node._outputs:<EOL><INDENT>fil.node = None<EOL><DEDENT>veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path,<EOL>tags=curr_tags,<EOL>valid_segment=seg_valid_seg)<EOL><DEDENT>return veto_xml_file<EOL>
Obtain veto segments for the selected ifo and veto category and add the job to generate this to the workflow. Parameters ----------- workflow: pycbc.workflow.core.Workflow An instance of the Workflow class that manages the workflow. ifo : string The string describing the ifo to generate vetoes for. category : int The veto category to generate vetoes for. start_time : gps time (either int/LIGOTimeGPS) The time at which to begin searching for segments. end_time : gps time (either int/LIGOTimeGPS) The time at which to stop searching for segments. out_dir : path The directory in which output will be stored. vetoGenJob : Job The veto generation Job class that will be used to create the Node. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! execute_now : boolean, optional If true, jobs are executed immediately. If false, they are added to the workflow to be run later. Returns -------- veto_def_file : pycbc.workflow.core.SegFile The workflow File object corresponding to this DQ veto file.
f16007:m7
def create_segs_from_cats_job(cp, out_dir, ifo_string, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>seg_server_url = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>veto_def_file = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>job = Executable(cp, '<STR_LIT>', universe='<STR_LIT>',<EOL>ifos=ifo_string, out_dir=out_dir, tags=tags)<EOL>job.add_opt('<STR_LIT>')<EOL>job.add_opt('<STR_LIT>', seg_server_url)<EOL>job.add_opt('<STR_LIT>', veto_def_file)<EOL>return job<EOL>
This function creates the CondorDAGJob that will be used to run ligolw_segments_from_cats as part of the workflow Parameters ----------- cp : pycbc.workflow.configuration.WorkflowConfigParser The in-memory representation of the configuration (.ini) files out_dir : path Directory in which to put output files ifo_string : string String containing all active ifos, ie. "H1L1V1" tag : list of strings, optional (default=None) Use this to specify a tag(s). This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- job : Job instance The Job instance that will run segments_from_cats jobs
f16007:m8
def get_cumulative_segs(workflow, categories, seg_files_list, out_dir,<EOL>tags=None, execute_now=False, segment_name=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>add_inputs = FileList([])<EOL>valid_segment = workflow.analysis_time<EOL>if segment_name is None:<EOL><INDENT>segment_name = '<STR_LIT>' % (categories[-<NUM_LIT:1>])<EOL><DEDENT>cp = workflow.cp<EOL>for ifo in workflow.ifos:<EOL><INDENT>cum_job = LigoLWCombineSegsExecutable(cp, '<STR_LIT>',<EOL>out_dir=out_dir, tags=[segment_name]+tags, ifos=ifo)<EOL>inputs = []<EOL>files = seg_files_list.find_output_with_ifo(ifo)<EOL>for category in categories:<EOL><INDENT>file_list = files.find_output_with_tag('<STR_LIT>' %(category))<EOL>inputs+=file_list<EOL><DEDENT>cum_node = cum_job.create_node(valid_segment, inputs, segment_name)<EOL>if file_needs_generating(cum_node.output_files[<NUM_LIT:0>].cache_entry.path,<EOL>workflow.cp, tags=tags):<EOL><INDENT>if execute_now:<EOL><INDENT>workflow.execute_node(cum_node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(cum_node)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cum_node.executed = True<EOL>for fil in cum_node._outputs:<EOL><INDENT>fil.node = None<EOL>fil.PFN(urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(fil.storage_path)),<EOL>site='<STR_LIT>')<EOL><DEDENT><DEDENT>add_inputs += cum_node.output_files<EOL><DEDENT>name = '<STR_LIT>' %(segment_name)<EOL>outfile = File(workflow.ifos, name, workflow.analysis_time,<EOL>directory=out_dir, extension='<STR_LIT>',<EOL>tags=[segment_name] + tags)<EOL>add_job = LigolwAddExecutable(cp, '<STR_LIT>', ifos=ifo, out_dir=out_dir,<EOL>tags=tags)<EOL>add_node = add_job.create_node(valid_segment, add_inputs, output=outfile)<EOL>if file_needs_generating(add_node.output_files[<NUM_LIT:0>].cache_entry.path,<EOL>workflow.cp, tags=tags):<EOL><INDENT>if execute_now:<EOL><INDENT>workflow.execute_node(add_node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(add_node)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>add_node.executed = True<EOL>for fil in add_node._outputs:<EOL><INDENT>fil.node = None<EOL>fil.PFN(urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(fil.storage_path)),<EOL>site='<STR_LIT>')<EOL><DEDENT><DEDENT>return outfile<EOL>
Function to generate one of the cumulative, multi-detector segment files as part of the workflow. Parameters ----------- workflow: pycbc.workflow.core.Workflow An instance of the Workflow class that manages the workflow. categories : int The veto categories to include in this cumulative veto. seg_files_list : Listionary of SegFiles The list of segment files to be used as input for combining. out_dir : path The directory to write output to. tags : list of strings, optional A list of strings that is used to identify this job execute_now : boolean, optional If true, jobs are executed immediately. If false, they are added to the workflow to be run later. segment_name : str The name of the combined, cumulative segments in the output file.
f16007:m9
def add_cumulative_files(workflow, output_file, input_files, out_dir,<EOL>execute_now=False, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>llwadd_job = LigolwAddExecutable(workflow.cp, '<STR_LIT>',<EOL>ifo=output_file.ifo_list, out_dir=out_dir, tags=tags)<EOL>add_node = llwadd_job.create_node(output_file.segment, input_files,<EOL>output=output_file)<EOL>if file_needs_generating(add_node.output_files[<NUM_LIT:0>].cache_entry.path,<EOL>workflow.cp, tags=tags):<EOL><INDENT>if execute_now:<EOL><INDENT>workflow.execute_node(add_node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(add_node)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>add_node.executed = True<EOL>for fil in add_node._outputs:<EOL><INDENT>fil.node = None<EOL>fil.PFN(urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(fil.storage_path)),<EOL>site='<STR_LIT>')<EOL><DEDENT><DEDENT>return add_node.output_files[<NUM_LIT:0>]<EOL>
Function to combine a set of segment files into a single one. This function will not merge the segment lists but keep each separate. Parameters ----------- workflow: pycbc.workflow.core.Workflow An instance of the Workflow class that manages the workflow. output_file: pycbc.workflow.core.File The output file object input_files: pycbc.workflow.core.FileList This list of input segment files out_dir : path The directory to write output to. execute_now : boolean, optional If true, jobs are executed immediately. If false, they are added to the workflow to be run later. tags : list of strings, optional A list of strings that is used to identify this job
f16007:m10
def find_playground_segments(segs):
<EOL>start_s2 = <NUM_LIT><EOL>playground_stride = <NUM_LIT><EOL>playground_length = <NUM_LIT><EOL>outlist = segments.segmentlist()<EOL>for seg in segs:<EOL><INDENT>start = seg[<NUM_LIT:0>]<EOL>end = seg[<NUM_LIT:1>]<EOL>playground_start = start_s2 + playground_stride * ( <NUM_LIT:1> +int(start-start_s2-playground_length) / playground_stride)<EOL>while playground_start < end:<EOL><INDENT>if playground_start > start:<EOL><INDENT>ostart = playground_start<EOL><DEDENT>else:<EOL><INDENT>ostart = start<EOL><DEDENT>playground_end = playground_start + playground_length<EOL>if playground_end < end:<EOL><INDENT>oend = playground_end<EOL><DEDENT>else:<EOL><INDENT>oend = end<EOL><DEDENT>x = segments.segment(ostart, oend)<EOL>outlist.append(x)<EOL>playground_start = playground_start + playground_stride<EOL><DEDENT><DEDENT>return outlist<EOL>
Finds playground time in a list of segments. Playground segments include the first 600s of every 6370s stride starting at GPS time 729273613. Parameters ---------- segs : segmentfilelist A segmentfilelist to find playground segments. Returns ------- outlist : segmentfilelist A segmentfilelist with all playground segments during the input segmentfilelist (ie. segs).
f16007:m11
def get_triggered_coherent_segment(workflow, sciencesegs):
<EOL>cp = workflow.cp<EOL>triggertime = int(os.path.basename(cp.get('<STR_LIT>', '<STR_LIT>')))<EOL>minduration = int(os.path.basename(cp.get('<STR_LIT>',<EOL>'<STR_LIT>')))<EOL>maxduration = int(os.path.basename(cp.get('<STR_LIT>',<EOL>'<STR_LIT>')))<EOL>onbefore = int(os.path.basename(cp.get('<STR_LIT>',<EOL>'<STR_LIT>')))<EOL>onafter = int(os.path.basename(cp.get('<STR_LIT>',<EOL>'<STR_LIT>')))<EOL>padding = int(os.path.basename(cp.get('<STR_LIT>',<EOL>'<STR_LIT>')))<EOL>if cp.has_option("<STR_LIT>", "<STR_LIT>"):<EOL><INDENT>padding += int(os.path.basename(cp.get("<STR_LIT>",<EOL>"<STR_LIT>")))<EOL><DEDENT>quanta = int(os.path.basename(cp.get('<STR_LIT>',<EOL>'<STR_LIT>')))<EOL>commonsegs = sciencesegs.extract_common(sciencesegs.keys())<EOL>offsrclist = commonsegs[commonsegs.keys()[<NUM_LIT:0>]]<EOL>if len(offsrclist) > <NUM_LIT:1>:<EOL><INDENT>logging.info("<STR_LIT>"<EOL>"<STR_LIT:time>")<EOL>for seg in offsrclist:<EOL><INDENT>if triggertime in seg:<EOL><INDENT>offsrc = seg<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>offsrc = offsrclist[<NUM_LIT:0>]<EOL><DEDENT>if abs(offsrc) < minduration + <NUM_LIT:2> * padding:<EOL><INDENT>fail = segments.segment([triggertime - minduration / <NUM_LIT> - padding,<EOL>triggertime + minduration / <NUM_LIT> + padding])<EOL>logging.warning("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>return None, fail<EOL><DEDENT>if abs(offsrc) >= maxduration + <NUM_LIT:2> * padding:<EOL><INDENT>logging.info("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>" % (abs(offsrc), maxduration))<EOL><DEDENT>else:<EOL><INDENT>logging.info("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>% (abs(offsrc), maxduration))<EOL><DEDENT>logging.info("<STR_LIT>"<EOL>% padding)<EOL>onstart = triggertime - onbefore<EOL>onend = triggertime + onafter<EOL>oncentre = onstart + ((onbefore + onafter) / <NUM_LIT:2>)<EOL>onsrc = segments.segment(onstart, onend)<EOL>logging.info("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>% (abs(onsrc), triggertime - onsrc[<NUM_LIT:0>],<EOL>onsrc[<NUM_LIT:1>] - triggertime))<EOL>onsrc = segments.segmentlist([onsrc])<EOL>idealsegment = segments.segment(int(oncentre - padding -<EOL><NUM_LIT:0.5> * maxduration),<EOL>int(oncentre + padding +<EOL><NUM_LIT:0.5> * maxduration))<EOL>if (idealsegment in offsrc):<EOL><INDENT>offsrc = idealsegment<EOL><DEDENT>elif idealsegment[<NUM_LIT:1>] not in offsrc:<EOL><INDENT>offsrc &= segments.segment(offsrc[<NUM_LIT:1>] - maxduration - <NUM_LIT:2> * padding,<EOL>offsrc[<NUM_LIT:1>])<EOL><DEDENT>elif idealsegment[<NUM_LIT:0>] not in offsrc:<EOL><INDENT>offsrc &= segments.segment(offsrc[<NUM_LIT:0>],<EOL>offsrc[<NUM_LIT:0>] + maxduration + <NUM_LIT:2> * padding)<EOL><DEDENT>excess = (abs(offsrc) - <NUM_LIT:2> * padding) % quanta<EOL>if excess != <NUM_LIT:0>:<EOL><INDENT>logging.info("<STR_LIT>"<EOL>"<STR_LIT>" % (excess, quanta))<EOL>offset = (offsrc[<NUM_LIT:0>] + abs(offsrc) / <NUM_LIT>) - oncentre<EOL>if <NUM_LIT:2> * abs(offset) > excess:<EOL><INDENT>if offset < <NUM_LIT:0>:<EOL><INDENT>offsrc &= segments.segment(offsrc[<NUM_LIT:0>] + excess,<EOL>offsrc[<NUM_LIT:1>])<EOL><DEDENT>elif offset > <NUM_LIT:0>:<EOL><INDENT>offsrc &= segments.segment(offsrc[<NUM_LIT:0>],<EOL>offsrc[<NUM_LIT:1>] - excess)<EOL><DEDENT>assert abs(offsrc) % quanta == <NUM_LIT:2> * padding<EOL><DEDENT>else:<EOL><INDENT>logging.info("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>start = int(offsrc[<NUM_LIT:0>] - offset + excess / <NUM_LIT:2>)<EOL>end = int(offsrc[<NUM_LIT:1>] - offset - round(float(excess) / <NUM_LIT:2>))<EOL>offsrc = segments.segment(start, end)<EOL>assert abs(offsrc) % quanta == <NUM_LIT:2> * padding<EOL><DEDENT><DEDENT>logging.info("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>% (abs(offsrc) - <NUM_LIT:2> * padding,<EOL>triggertime - offsrc[<NUM_LIT:0>] - padding,<EOL>offsrc[<NUM_LIT:1>] - triggertime - padding))<EOL>offsrc = segments.segmentlist([offsrc])<EOL>onsource = segments.segmentlistdict()<EOL>offsource = segments.segmentlistdict()<EOL>ifos = '<STR_LIT>'<EOL>for iifo in sciencesegs.keys():<EOL><INDENT>ifos += str(iifo)<EOL>onsource[iifo] = onsrc<EOL>offsource[iifo] = offsrc<EOL><DEDENT>return onsource, offsource<EOL>
Construct the coherent network on and off source segments. Can switch to construction of segments for a single IFO search when coherent segments are insufficient for a search. Parameters ----------- workflow : pycbc.workflow.core.Workflow The workflow instance that the calculated segments belong to. sciencesegs : dict Dictionary of all science segments within analysis time. Returns -------- onsource : ligo.segments.segmentlistdict A dictionary containing the on source segments for network IFOs offsource : ligo.segments.segmentlistdict A dictionary containing the off source segments for network IFOs
f16007:m12
def save_veto_definer(cp, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>make_analysis_dir(out_dir)<EOL>veto_def_url = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>veto_def_base_name = os.path.basename(veto_def_url)<EOL>veto_def_new_path = os.path.abspath(os.path.join(out_dir,<EOL>veto_def_base_name))<EOL>resolve_url(veto_def_url,out_dir)<EOL>cp.set("<STR_LIT>", "<STR_LIT>", veto_def_new_path)<EOL>return veto_def_new_path<EOL>
Retrieve the veto definer file and save it locally Parameters ----------- cp : ConfigParser instance out_dir : path tags : list of strings Used to retrieve subsections of the ini file for configuration options.
f16007:m14
def parse_cat_ini_opt(cat_str):
if cat_str == "<STR_LIT>":<EOL><INDENT>return []<EOL><DEDENT>cat_groups = cat_str.split('<STR_LIT:U+002C>')<EOL>cat_sets = []<EOL>for group in cat_groups:<EOL><INDENT>group = group.strip()<EOL>cat_sets += [set(c for c in group)]<EOL><DEDENT>return cat_sets<EOL>
Parse a cat str from the ini file into a list of sets
f16007:m15
def cat_to_veto_def_cat(val):
if val == '<STR_LIT:1>':<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if val == '<STR_LIT:2>':<EOL><INDENT>return <NUM_LIT:2><EOL><DEDENT>if val == '<STR_LIT:3>':<EOL><INDENT>return <NUM_LIT:4><EOL><DEDENT>if val == '<STR_LIT:H>':<EOL><INDENT>return <NUM_LIT:3><EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>
Convert a category character to the corresponding value in the veto definer file. Parameters ---------- str : single character string The input category character Returns ------- pipedown_str : str The pipedown equivelant notation that can be passed to programs that expect this definition.
f16007:m16
def file_needs_generating(file_path, cp, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>value = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>generate_segment_files = value<EOL><DEDENT>else:<EOL><INDENT>generate_segment_files = '<STR_LIT>'<EOL><DEDENT>if os.path.isfile(file_path):<EOL><INDENT>if generate_segment_files in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>elif generate_segment_files == '<STR_LIT>':<EOL><INDENT>err_msg = "<STR_LIT>" %(file_path,)<EOL>err_msg += "<STR_LIT>"<EOL>logging.warn(err_msg)<EOL>return <NUM_LIT:1><EOL><DEDENT>elif generate_segment_files == '<STR_LIT>':<EOL><INDENT>err_msg = "<STR_LIT>" %(file_path,)<EOL>err_msg += "<STR_LIT>"<EOL>raise ValueError(err_msg)<EOL><DEDENT>else:<EOL><INDENT>err_msg = '<STR_LIT>'<EOL>err_msg += '<STR_LIT>'<EOL>err_msg += '<STR_LIT>' %(generate_segment_files,)<EOL>raise ValueError(err_msg)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if generate_segment_files in ['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>elif generate_segment_files == '<STR_LIT>':<EOL><INDENT>err_msg = '<STR_LIT>' %(file_path,)<EOL>raise ValueError(err_msg)<EOL><DEDENT>else:<EOL><INDENT>err_msg = '<STR_LIT>'<EOL>err_msg += '<STR_LIT>'<EOL>err_msg += '<STR_LIT>' %(generate_segment_files,)<EOL>raise ValueError(err_msg)<EOL><DEDENT><DEDENT>
This job tests the file location and determines if the file should be generated now or if an error should be raised. This uses the generate_segment_files variable, global to this module, which is described above and in the documentation. Parameters ----------- file_path : path Location of file to check cp : ConfigParser The associated ConfigParser from which the segments-generate-segment-files variable is returned. It is recommended for most applications to use the default option by leaving segments-generate-segment-files blank, which will regenerate all segment files at runtime. Only use this facility if you need it. Choices are * 'always' : DEFAULT: All files will be generated even if they already exist. * 'if_not_present': Files will be generated if they do not already exist. Pre-existing files will be read in and used. * 'error_on_duplicate': Files will be generated if they do not already exist. Pre-existing files will raise a failure. * 'never': Pre-existing files will be read in and used. If no file exists the code will fail. Returns -------- int 1 = Generate the file. 0 = File already exists, use it. Other cases will raise an error.
f16007:m17
def get_segments_file(workflow, name, option_name, out_dir):
from pycbc.dq import query_str<EOL>make_analysis_dir(out_dir)<EOL>cp = workflow.cp<EOL>start = workflow.analysis_time[<NUM_LIT:0>]<EOL>end = workflow.analysis_time[<NUM_LIT:1>]<EOL>veto_definer = None<EOL>if cp.has_option("<STR_LIT>", "<STR_LIT>"):<EOL><INDENT>veto_definer = save_veto_definer(workflow.cp, out_dir, [])<EOL><DEDENT>server = "<STR_LIT>"<EOL>if cp.has_option("<STR_LIT>", "<STR_LIT>"):<EOL><INDENT>server = cp.get("<STR_LIT>",<EOL>"<STR_LIT>")<EOL><DEDENT>segs = {}<EOL>for ifo in workflow.ifos:<EOL><INDENT>flag_str = cp.get_opt_tags("<STR_LIT>", option_name, [ifo])<EOL>key = ifo + '<STR_LIT::>' + name<EOL>segs[key] = query_str(ifo, flag_str, start, end,<EOL>server=server,<EOL>veto_definer=veto_definer)<EOL>logging.info("<STR_LIT>", ifo, option_name)<EOL><DEDENT>return SegFile.from_segment_list_dict(name, segs,<EOL>extension='<STR_LIT>',<EOL>valid_segment=workflow.analysis_time,<EOL>directory=out_dir)<EOL>
Get cumulative segments from option name syntax for each ifo. Use syntax of configparser string to define the resulting segment_file e.x. option_name = +up_flag1,+up_flag2,+up_flag3,-down_flag1,-down_flag2 Each ifo may have a different string and is stored separately in the file. Flags which add time must precede flags which subtract time. Parameters ---------- workflow: pycbc.workflow.Workflow name: string Name of the segment list being created option_name: str Name of option in the associated config parser to get the flag list returns -------- seg_file: pycbc.workflow.SegFile SegFile intance that points to the segment xml file on disk.
f16007:m18
def setup_foreground_inference(workflow, coinc_file, single_triggers,<EOL>tmpltbank_file, insp_segs, insp_data_name,<EOL>insp_anal_name, dax_output, out_dir, tags=None):
logging.info("<STR_LIT>")<EOL>if not workflow.cp.has_section("<STR_LIT>"):<EOL><INDENT>logging.info("<STR_LIT>")<EOL>logging.info("<STR_LIT>")<EOL>return<EOL><DEDENT>tags = [] if tags is None else tags<EOL>makedir(dax_output)<EOL>config_path = os.path.abspath(dax_output + "<STR_LIT:/>" + "<STR_LIT:_>".join(tags)+ "<STR_LIT>")<EOL>workflow.cp.write(open(config_path, "<STR_LIT:w>"))<EOL>config_file = wdax.File(os.path.basename(config_path))<EOL>config_file.PFN(config_path, "<STR_LIT>")<EOL>exe = Executable(workflow.cp, "<STR_LIT>", ifos=workflow.ifos,<EOL>out_dir=dax_output)<EOL>node = exe.create_node()<EOL>node.add_input_opt("<STR_LIT>", config_file)<EOL>node.add_input_opt("<STR_LIT>", tmpltbank_file)<EOL>node.add_input_opt("<STR_LIT>", coinc_file)<EOL>node.add_multiifo_input_list_opt("<STR_LIT>",<EOL>single_triggers)<EOL>node.new_output_file_opt(workflow.analysis_time, "<STR_LIT>", "<STR_LIT>",<EOL>tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, "<STR_LIT>",<EOL>"<STR_LIT>", tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, "<STR_LIT>",<EOL>"<STR_LIT>", tags=tags)<EOL>name = node.output_files[<NUM_LIT:0>].name<EOL>node.add_opt("<STR_LIT>", name)<EOL>map_file = node.output_files[<NUM_LIT:1>]<EOL>node.add_opt("<STR_LIT>", out_dir)<EOL>tc_file = node.output_files[<NUM_LIT:2>]<EOL>workflow += node<EOL>fil = node.output_files[<NUM_LIT:0>]<EOL>job = dax.DAX(fil)<EOL>job.addArguments("<STR_LIT>" % os.path.splitext(os.path.basename(name))[<NUM_LIT:0>])<EOL>Workflow.set_job_properties(job, map_file, tc_file)<EOL>workflow._adag.addJob(job)<EOL>dep = dax.Dependency(parent=node._dax_node, child=job)<EOL>workflow._adag.addDependency(dep)<EOL>logging.info("<STR_LIT>")<EOL>
Creates workflow node that will run the inference workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: pycbc.workflow.File The file associated with coincident triggers. single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. dax_output : str The name of the output DAX file. out_dir: path The directory to store inference result plots and files tags: {None, optional} Tags to add to the inference executables
f16008:m0
def make_inference_prior_plot(workflow, config_file, output_dir,<EOL>sections=None, name="<STR_LIT>",<EOL>analysis_seg=None, tags=None):
<EOL>tags = [] if tags is None else tags<EOL>analysis_seg = workflow.analysis_timeif analysis_seg is None else analysis_seg<EOL>makedir(output_dir)<EOL>node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,<EOL>out_dir=output_dir, universe="<STR_LIT>",<EOL>tags=tags).create_node()<EOL>node.add_input_opt("<STR_LIT>", config_file)<EOL>node.new_output_file_opt(analysis_seg, "<STR_LIT>", "<STR_LIT>")<EOL>if sections is not None:<EOL><INDENT>node.add_opt("<STR_LIT>", "<STR_LIT:U+0020>".join(sections))<EOL><DEDENT>workflow += node<EOL>return node.output_files<EOL>
Sets up the corner plot of the priors in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating config_file: pycbc.workflow.File The WorkflowConfigParser parasable inference configuration file.. output_dir: str The directory to store result plots and files. sections : list A list of subsections to use. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files.
f16008:m1
def make_inference_summary_table(workflow, inference_file, output_dir,<EOL>variable_args=None, name="<STR_LIT>",<EOL>analysis_seg=None, tags=None):
<EOL>tags = [] if tags is None else tags<EOL>analysis_seg = workflow.analysis_timeif analysis_seg is None else analysis_seg<EOL>makedir(output_dir)<EOL>node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,<EOL>out_dir=output_dir, tags=tags).create_node()<EOL>node.add_input_opt("<STR_LIT>", inference_file)<EOL>node.new_output_file_opt(analysis_seg, "<STR_LIT>", "<STR_LIT>")<EOL>node.add_opt("<STR_LIT>", "<STR_LIT:U+0020>".join(variable_args))<EOL>workflow += node<EOL>return node.output_files<EOL>
Sets up the corner plot of the posteriors in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_file: pycbc.workflow.File The file with posterior samples. output_dir: str The directory to store result plots and files. variable_args : list A list of parameters to use instead of [variable_args]. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files.
f16008:m2
def make_inference_posterior_plot(<EOL>workflow, inference_file, output_dir, parameters=None,<EOL>name="<STR_LIT>", analysis_seg=None, tags=None):
<EOL>tags = [] if tags is None else tags<EOL>analysis_seg = workflow.analysis_timeif analysis_seg is None else analysis_seg<EOL>makedir(output_dir)<EOL>node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,<EOL>out_dir=output_dir, universe="<STR_LIT>",<EOL>tags=tags).create_node()<EOL>node.add_input_opt("<STR_LIT>", inference_file)<EOL>node.new_output_file_opt(analysis_seg, "<STR_LIT>", "<STR_LIT>")<EOL>if parameters is not None:<EOL><INDENT>node.add_opt("<STR_LIT>", "<STR_LIT:U+0020>".join(parameters))<EOL><DEDENT>workflow += node<EOL>return node.output_files<EOL>
Sets up the corner plot of the posteriors in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_file: pycbc.workflow.File The file with posterior samples. output_dir: str The directory to store result plots and files. parameters : list A list of parameters to plot. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files.
f16008:m3
def make_inference_acceptance_rate_plot(workflow, inference_file, output_dir,<EOL>name="<STR_LIT>", analysis_seg=None, tags=None):
<EOL>tags = [] if tags is None else tags<EOL>analysis_seg = workflow.analysis_timeif analysis_seg is None else analysis_seg<EOL>makedir(output_dir)<EOL>node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,<EOL>out_dir=output_dir, tags=tags).create_node()<EOL>node.add_input_opt("<STR_LIT>", inference_file)<EOL>node.new_output_file_opt(analysis_seg, "<STR_LIT>", "<STR_LIT>")<EOL>workflow += node<EOL>return node.output_files<EOL>
Sets up the acceptance rate plot in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_file: pycbc.workflow.File The file with posterior samples. output_dir: str The directory to store result plots and files. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files.
f16008:m6
def make_inference_inj_plots(workflow, inference_files, output_dir,<EOL>parameters, name="<STR_LIT>",<EOL>analysis_seg=None, tags=None):
<EOL>tags = [] if tags is None else tags<EOL>analysis_seg = workflow.analysis_timeif analysis_seg is None else analysis_seg<EOL>output_files = FileList([])<EOL>makedir(output_dir)<EOL>for (ii, param) in enumerate(parameters):<EOL><INDENT>plot_exe = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,<EOL>out_dir=output_dir,<EOL>tags=tags+['<STR_LIT>'.format(ii)])<EOL>node = plot_exe.create_node()<EOL>node.add_input_list_opt("<STR_LIT>", inference_files)<EOL>node.new_output_file_opt(analysis_seg, "<STR_LIT>", "<STR_LIT>")<EOL>node.add_opt("<STR_LIT>", param)<EOL>workflow += node<EOL>output_files += node.output_files<EOL><DEDENT>return output_files<EOL>
Sets up the recovered versus injected parameter plot in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_files: pycbc.workflow.FileList The files with posterior samples. output_dir: str The directory to store result plots and files. parameters : list A ``list`` of parameters. Each parameter gets its own plot. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files.
f16008:m7
def setup_matchedfltr_workflow(workflow, science_segs, datafind_outs,<EOL>tmplt_banks, output_dir=None,<EOL>injection_file=None, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>make_analysis_dir(output_dir)<EOL>cp = workflow.cp<EOL>mfltrMethod = cp.get_opt_tags("<STR_LIT>", "<STR_LIT>",<EOL>tags)<EOL>if mfltrMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>if not cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>logging.warn(errMsg)<EOL><DEDENT>linkToTmpltbank = True<EOL><DEDENT>else:<EOL><INDENT>linkToTmpltbank = False<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>if not linkToTmpltbank:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>if not cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>compatibility_mode = True<EOL><DEDENT>else:<EOL><INDENT>compatibility_mode = False<EOL><DEDENT>inspiral_outs = setup_matchedfltr_dax_generated(workflow, science_segs,<EOL>datafind_outs, tmplt_banks, output_dir,<EOL>injection_file=injection_file,<EOL>tags=tags,<EOL>link_to_tmpltbank=linkToTmpltbank,<EOL>compatibility_mode=compatibility_mode)<EOL><DEDENT>elif mfltrMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>inspiral_outs = setup_matchedfltr_dax_generated_multi(workflow,<EOL>science_segs, datafind_outs, tmplt_banks,<EOL>output_dir, injection_file=injection_file,<EOL>tags=tags)<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>return inspiral_outs<EOL>
This function aims to be the gateway for setting up a set of matched-filter jobs in a workflow. This function is intended to support multiple different ways/codes that could be used for doing this. For now the only supported sub-module is one that runs the matched-filtering by setting up a serious of matched-filtering jobs, from one executable, to create matched-filter triggers covering the full range of science times for which there is data and a template bank file. Parameters ----------- Workflow : pycbc.workflow.core.Workflow The workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly.
f16009:m0
def setup_matchedfltr_dax_generated(workflow, science_segs, datafind_outs,<EOL>tmplt_banks, output_dir,<EOL>injection_file=None,<EOL>tags=None, link_to_tmpltbank=False,<EOL>compatibility_mode=False):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>cp = workflow.cp<EOL>ifos = science_segs.keys()<EOL>match_fltr_exe = os.path.basename(cp.get('<STR_LIT>','<STR_LIT>'))<EOL>exe_class = select_matchedfilter_class(match_fltr_exe)<EOL>if link_to_tmpltbank:<EOL><INDENT>tmpltbank_exe = os.path.basename(cp.get('<STR_LIT>', '<STR_LIT>'))<EOL>link_exe_instance = select_tmpltbank_class(tmpltbank_exe)<EOL><DEDENT>else:<EOL><INDENT>link_exe_instance = None<EOL><DEDENT>inspiral_outs = FileList([])<EOL>for ifo in ifos:<EOL><INDENT>logging.info("<STR_LIT>" %(ifo))<EOL>job_instance = exe_class(workflow.cp, '<STR_LIT>', ifo=ifo,<EOL>out_dir=output_dir,<EOL>injection_file=injection_file,<EOL>tags=tags)<EOL>if link_exe_instance:<EOL><INDENT>link_job_instance = link_exe_instance(cp, '<STR_LIT>', ifo=ifo,<EOL>out_dir=output_dir, tags=tags)<EOL><DEDENT>else:<EOL><INDENT>link_job_instance = None<EOL><DEDENT>sngl_ifo_job_setup(workflow, ifo, inspiral_outs, job_instance,<EOL>science_segs[ifo], datafind_outs,<EOL>parents=tmplt_banks, allow_overlap=False,<EOL>link_job_instance=link_job_instance,<EOL>compatibility_mode=compatibility_mode)<EOL><DEDENT>return inspiral_outs<EOL>
Setup matched-filter jobs that are generated as part of the workflow. This module can support any matched-filter code that is similar in principle to lalapps_inspiral, but for new codes some additions are needed to define Executable and Job sub-classes (see jobutils.py). Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. link_to_tmpltbank : boolean, optional (default=True) If this option is given, the job valid_times will be altered so that there will be one inspiral file for every template bank and they will cover the same time span. Note that this option must also be given during template bank generation to be meaningful. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly.
f16009:m1
def setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs,<EOL>tmplt_banks, output_dir,<EOL>injection_file=None,<EOL>tags=None, link_to_tmpltbank=False,<EOL>compatibility_mode=False):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>cp = workflow.cp<EOL>ifos = sorted(science_segs.keys())<EOL>match_fltr_exe = os.path.basename(cp.get('<STR_LIT>','<STR_LIT>'))<EOL>inspiral_outs = FileList([])<EOL>logging.info("<STR_LIT>" %('<STR_LIT:U+0020>'.join(ifos),))<EOL>if match_fltr_exe == '<STR_LIT>':<EOL><INDENT>exe_class = select_matchedfilter_class(match_fltr_exe)<EOL>cp.set('<STR_LIT>', '<STR_LIT>',str(radians(float(cp.get('<STR_LIT>', '<STR_LIT>')))))<EOL>cp.set('<STR_LIT>', '<STR_LIT>',str(radians(float(cp.get('<STR_LIT>', '<STR_LIT>')))))<EOL>job_instance = exe_class(workflow.cp, '<STR_LIT>', ifo=ifos,<EOL>out_dir=output_dir,<EOL>injection_file=injection_file,<EOL>tags=tags)<EOL>if cp.has_option("<STR_LIT>", "<STR_LIT>") and "<STR_LIT>" in tags[-<NUM_LIT:1>]:<EOL><INDENT>slide_num = int(tags[-<NUM_LIT:1>].replace("<STR_LIT>", "<STR_LIT>"))<EOL>logging.info("<STR_LIT>"<EOL>.format(slide_num))<EOL>slide_shift = int(cp.get("<STR_LIT>", "<STR_LIT>"))<EOL>time_slide_dict = {ifo: (slide_num + <NUM_LIT:1>) * ix * slide_shift<EOL>for ix, ifo in enumerate(ifos)}<EOL>multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,<EOL>science_segs, datafind_outs,<EOL>output_dir, parents=tmplt_banks,<EOL>slide_dict=time_slide_dict)<EOL><DEDENT>else:<EOL><INDENT>multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,<EOL>science_segs, datafind_outs,<EOL>output_dir, parents=tmplt_banks)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return inspiral_outs<EOL>
Setup matched-filter jobs that are generated as part of the workflow in which a single job reads in and generates triggers over multiple ifos. This module can support any matched-filter code that is similar in principle to pycbc_multi_inspiral or lalapps_coh_PTF_inspiral, but for new codes some additions are needed to define Executable and Job sub-classes (see jobutils.py). Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly.
f16009:m2
def convert_bank_to_hdf(workflow, xmlbank, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>if len(xmlbank) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>make_analysis_dir(out_dir)<EOL>bank2hdf_exe = PyCBCBank2HDFExecutable(workflow.cp, '<STR_LIT>',<EOL>ifos=workflow.ifos,<EOL>out_dir=out_dir, tags=tags)<EOL>bank2hdf_node = bank2hdf_exe.create_node(xmlbank[<NUM_LIT:0>])<EOL>workflow.add_node(bank2hdf_node)<EOL>return bank2hdf_node.output_files<EOL>
Return the template bank in hdf format
f16010:m4
def convert_trig_to_hdf(workflow, hdfbank, xml_trigger_files, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>make_analysis_dir(out_dir)<EOL>trig_files = FileList()<EOL>for ifo, insp_group in zip(*xml_trigger_files.categorize_by_attr('<STR_LIT>')):<EOL><INDENT>trig2hdf_exe = PyCBCTrig2HDFExecutable(workflow.cp, '<STR_LIT>',<EOL>ifos=ifo, out_dir=out_dir, tags=tags)<EOL>_, insp_bundles = insp_group.categorize_by_attr('<STR_LIT>')<EOL>for insps in insp_bundles:<EOL><INDENT>trig2hdf_node = trig2hdf_exe.create_node(insps, hdfbank[<NUM_LIT:0>])<EOL>workflow.add_node(trig2hdf_node)<EOL>trig_files += trig2hdf_node.output_files<EOL><DEDENT><DEDENT>return trig_files<EOL>
Return the list of hdf5 trigger files outputs
f16010:m5
def setup_interval_coinc_inj(workflow, hdfbank, full_data_trig_files, inj_trig_files,<EOL>stat_files, background_file, veto_file, veto_name, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>make_analysis_dir(out_dir)<EOL>logging.info('<STR_LIT>')<EOL>if len(hdfbank) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>hdfbank = hdfbank[<NUM_LIT:0>]<EOL>if len(workflow.ifos) > <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>factor = int(workflow.cp.get_opt_tags('<STR_LIT>', '<STR_LIT>', tags))<EOL>ffiles = {}<EOL>ifiles = {}<EOL>for ifo, ffi in zip(*full_data_trig_files.categorize_by_attr('<STR_LIT>')):<EOL><INDENT>ffiles[ifo] = ffi[<NUM_LIT:0>]<EOL><DEDENT>ifos, files = inj_trig_files.categorize_by_attr('<STR_LIT>') <EOL>for ifo, ifi in zip(ifos, files):<EOL><INDENT>ifiles[ifo] = ifi[<NUM_LIT:0>]<EOL><DEDENT>ifo0, ifo1 = ifos[<NUM_LIT:0>], ifos[<NUM_LIT:1>]<EOL>combo = [(FileList([ifiles[ifo0], ifiles[ifo1]]), "<STR_LIT>"),<EOL>(FileList([ifiles[ifo0], ffiles[ifo1]]), "<STR_LIT>"),<EOL>(FileList([ifiles[ifo1], ffiles[ifo0]]), "<STR_LIT>"),<EOL>]<EOL>bg_files = {'<STR_LIT>':[], '<STR_LIT>':[], '<STR_LIT>':[]}<EOL>for trig_files, ctag in combo:<EOL><INDENT>findcoinc_exe = PyCBCFindCoincExecutable(workflow.cp, '<STR_LIT>',<EOL>ifos=workflow.ifos,<EOL>tags=tags + [ctag], out_dir=out_dir)<EOL>for i in range(factor):<EOL><INDENT>group_str = '<STR_LIT>' % (i, factor)<EOL>coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,<EOL>stat_files,<EOL>veto_file, veto_name,<EOL>group_str,<EOL>tags=[str(i)])<EOL>bg_files[ctag] += coinc_node.output_files<EOL>workflow.add_node(coinc_node)<EOL><DEDENT><DEDENT>return setup_statmap_inj(workflow, bg_files, background_file, hdfbank, out_dir, tags=tags)<EOL>
This function sets up exact match coincidence and background estimation using a folded interval technique.
f16010:m14
def setup_interval_coinc(workflow, hdfbank, trig_files, stat_files,<EOL>veto_files, veto_names, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>make_analysis_dir(out_dir)<EOL>logging.info('<STR_LIT>')<EOL>if len(hdfbank) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % len(hdfbank))<EOL><DEDENT>hdfbank = hdfbank[<NUM_LIT:0>]<EOL>if len(workflow.ifos) > <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>findcoinc_exe = PyCBCFindCoincExecutable(workflow.cp, '<STR_LIT>',<EOL>ifos=workflow.ifos,<EOL>tags=tags, out_dir=out_dir)<EOL>factor = int(workflow.cp.get_opt_tags('<STR_LIT>', '<STR_LIT>', tags))<EOL>statmap_files = []<EOL>for veto_file, veto_name in zip(veto_files, veto_names):<EOL><INDENT>bg_files = FileList()<EOL>for i in range(factor):<EOL><INDENT>group_str = '<STR_LIT>' % (i, factor)<EOL>coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,<EOL>stat_files,<EOL>veto_file, veto_name,<EOL>group_str,<EOL>tags=[veto_name, str(i)])<EOL>bg_files += coinc_node.output_files<EOL>workflow.add_node(coinc_node)<EOL><DEDENT>statmap_files += [setup_statmap(workflow, bg_files, hdfbank, out_dir, tags=tags + [veto_name])]<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>return statmap_files<EOL>
This function sets up exact match coincidence and background estimation using a folded interval technique.
f16010:m15
def setup_multiifo_interval_coinc_inj(workflow, hdfbank, full_data_trig_files, inj_trig_files,<EOL>stat_files, background_file, veto_file, veto_name,<EOL>out_dir, pivot_ifo, fixed_ifo, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>make_analysis_dir(out_dir)<EOL>logging.info('<STR_LIT>')<EOL>if len(hdfbank) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % len(hdfbank))<EOL><DEDENT>hdfbank = hdfbank[<NUM_LIT:0>]<EOL>factor = int(workflow.cp.get_opt_tags('<STR_LIT>', '<STR_LIT>', tags))<EOL>ffiles = {}<EOL>ifiles = {}<EOL>for ifo, ffi in zip(*full_data_trig_files.categorize_by_attr('<STR_LIT>')):<EOL><INDENT>ffiles[ifo] = ffi[<NUM_LIT:0>]<EOL><DEDENT>for ifo, ifi in zip(*inj_trig_files.categorize_by_attr('<STR_LIT>')):<EOL><INDENT>ifiles[ifo] = ifi[<NUM_LIT:0>]<EOL><DEDENT>injinj_files = FileList()<EOL>injfull_files = FileList()<EOL>fullinj_files = FileList()<EOL>for ifo in ifiles: <EOL><INDENT>if ifo == pivot_ifo:<EOL><INDENT>injinj_files.append(ifiles[ifo])<EOL>injfull_files.append(ifiles[ifo])<EOL>fullinj_files.append(ffiles[ifo])<EOL><DEDENT>else:<EOL><INDENT>injinj_files.append(ifiles[ifo])<EOL>injfull_files.append(ffiles[ifo])<EOL>fullinj_files.append(ifiles[ifo])<EOL><DEDENT><DEDENT>combo = [(injinj_files, "<STR_LIT>"),<EOL>(injfull_files, "<STR_LIT>"),<EOL>(fullinj_files, "<STR_LIT>"),<EOL>]<EOL>bg_files = {'<STR_LIT>':[], '<STR_LIT>':[], '<STR_LIT>':[]}<EOL>for trig_files, ctag in combo:<EOL><INDENT>findcoinc_exe = PyCBCFindMultiifoCoincExecutable(workflow.cp,<EOL>'<STR_LIT>',<EOL>ifos=ifiles.keys(),<EOL>tags=tags + [ctag],<EOL>out_dir=out_dir)<EOL>for i in range(factor):<EOL><INDENT>group_str = '<STR_LIT>' % (i, factor)<EOL>coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,<EOL>stat_files,<EOL>veto_file, veto_name,<EOL>group_str,<EOL>pivot_ifo,<EOL>fixed_ifo,<EOL>tags=[veto_name, str(i)])<EOL>bg_files[ctag] += coinc_node.output_files<EOL>workflow.add_node(coinc_node)<EOL><DEDENT><DEDENT>logging.info('<STR_LIT>')<EOL>return setup_multiifo_statmap_inj(workflow, ifiles.keys(), bg_files, background_file, out_dir, tags=tags + [veto_name])<EOL>
This function sets up exact match multiifo coincidence for injections
f16010:m16
def setup_multiifo_interval_coinc(workflow, hdfbank, trig_files, stat_files,<EOL>veto_files, veto_names, out_dir, pivot_ifo, fixed_ifo, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>make_analysis_dir(out_dir)<EOL>logging.info('<STR_LIT>')<EOL>if len(hdfbank) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % len(hdfbank))<EOL><DEDENT>hdfbank = hdfbank[<NUM_LIT:0>]<EOL>ifos, _ = trig_files.categorize_by_attr('<STR_LIT>')<EOL>findcoinc_exe = PyCBCFindMultiifoCoincExecutable(workflow.cp, '<STR_LIT>',<EOL>ifos=ifos,<EOL>tags=tags, out_dir=out_dir)<EOL>factor = int(workflow.cp.get_opt_tags('<STR_LIT>', '<STR_LIT>', tags))<EOL>statmap_files = []<EOL>for veto_file, veto_name in zip(veto_files, veto_names):<EOL><INDENT>bg_files = FileList()<EOL>for i in range(factor):<EOL><INDENT>group_str = '<STR_LIT>' % (i, factor)<EOL>coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,<EOL>stat_files,<EOL>veto_file, veto_name,<EOL>group_str,<EOL>pivot_ifo,<EOL>fixed_ifo,<EOL>tags=[veto_name, str(i)])<EOL>bg_files += coinc_node.output_files<EOL>workflow.add_node(coinc_node)<EOL><DEDENT>statmap_files += [setup_multiifo_statmap(workflow, ifos, bg_files, out_dir, tags=tags + [veto_name])]<EOL><DEDENT>logging.info('<STR_LIT>')<EOL>return statmap_files<EOL>
This function sets up exact match multiifo coincidence
f16010:m17
def select_files_by_ifo_combination(ifocomb, insps):
inspcomb = FileList()<EOL>for ifo, ifile in zip(*insps.categorize_by_attr('<STR_LIT>')):<EOL><INDENT>if ifo in ifocomb:<EOL><INDENT>inspcomb += ifile<EOL><DEDENT><DEDENT>return inspcomb<EOL>
This function selects single-detector files ('insps') for a given ifo combination
f16010:m18
def get_ordered_ifo_list(ifocomb, ifo_ids):
<EOL>combination_prec = {ifo: ifo_ids[ifo] for ifo in ifocomb}<EOL>ordered_ifo_list = sorted(combination_prec, key = combination_prec.get)<EOL>pivot_ifo = ordered_ifo_list[<NUM_LIT:0>]<EOL>fixed_ifo = ordered_ifo_list[<NUM_LIT:1>]<EOL>return pivot_ifo, fixed_ifo, '<STR_LIT>'.join(ordered_ifo_list)<EOL>
This function sorts the combination of ifos (ifocomb) based on the given precedence list (ifo_ids dictionary) and returns the first ifo as pivot the second ifo as fixed, and the ordered list joined as a string.
f16010:m19
def setup_multiifo_combine_statmap(workflow, final_bg_file_list, out_dir, tags):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>make_analysis_dir(out_dir)<EOL>logging.info('<STR_LIT>')<EOL>cstat_exe = PyCBCMultiifoCombineStatmap(workflow.cp,<EOL>'<STR_LIT>',<EOL>ifos=workflow.ifos,<EOL>tags=tags,<EOL>out_dir=out_dir)<EOL>ifolist = '<STR_LIT:U+0020>'.join(workflow.ifos)<EOL>cluster_window = float(workflow.cp.get_opt_tags('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>tags))<EOL>combine_statmap_node = cstat_exe.create_node(final_bg_file_list,<EOL>ifolist,<EOL>cluster_window,<EOL>tags)<EOL>workflow.add_node(combine_statmap_node)<EOL>return combine_statmap_node.output_file<EOL>
Combine the multiifo statmap files into one background file
f16010:m20
def frequency_noise_from_psd(psd, seed=None):
sigma = <NUM_LIT:0.5> * (psd / psd.delta_f) ** (<NUM_LIT:0.5>)<EOL>if seed is not None:<EOL><INDENT>numpy.random.seed(seed)<EOL><DEDENT>sigma = sigma.numpy()<EOL>dtype = complex_same_precision_as(psd)<EOL>not_zero = (sigma != <NUM_LIT:0>)<EOL>sigma_red = sigma[not_zero]<EOL>noise_re = numpy.random.normal(<NUM_LIT:0>, sigma_red)<EOL>noise_co = numpy.random.normal(<NUM_LIT:0>, sigma_red)<EOL>noise_red = noise_re + <NUM_LIT> * noise_co<EOL>noise = numpy.zeros(len(sigma), dtype=dtype)<EOL>noise[not_zero] = noise_red<EOL>return FrequencySeries(noise,<EOL>delta_f=psd.delta_f,<EOL>dtype=dtype)<EOL>
Create noise with a given psd. Return noise coloured with the given psd. The returned noise FrequencySeries has the same length and frequency step as the given psd. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- psd : FrequencySeries The noise weighting to color the noise. seed : {0, int} or None The seed to generate the noise. If None specified, the seed will not be reset. Returns -------- noise : FrequencySeriesSeries A FrequencySeries containing gaussian noise colored by the given psd.
f16012:m0
def noise_from_psd(length, delta_t, psd, seed=None):
noise_ts = TimeSeries(zeros(length), delta_t=delta_t)<EOL>if seed is None:<EOL><INDENT>seed = numpy.random.randint(<NUM_LIT:2>**<NUM_LIT:32>)<EOL><DEDENT>randomness = lal.gsl_rng("<STR_LIT>", seed)<EOL>N = int (<NUM_LIT:1.0> / delta_t / psd.delta_f)<EOL>n = N//<NUM_LIT:2>+<NUM_LIT:1><EOL>stride = N//<NUM_LIT:2><EOL>if n > len(psd):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>psd = (psd[<NUM_LIT:0>:n]).lal()<EOL>psd.data.data[n-<NUM_LIT:1>] = <NUM_LIT:0><EOL>segment = TimeSeries(zeros(N), delta_t=delta_t).lal()<EOL>length_generated = <NUM_LIT:0><EOL>SimNoise(segment, <NUM_LIT:0>, psd, randomness)<EOL>while (length_generated < length):<EOL><INDENT>if (length_generated + stride) < length:<EOL><INDENT>noise_ts.data[length_generated:length_generated+stride] = segment.data.data[<NUM_LIT:0>:stride]<EOL><DEDENT>else:<EOL><INDENT>noise_ts.data[length_generated:length] = segment.data.data[<NUM_LIT:0>:length-length_generated]<EOL><DEDENT>length_generated += stride<EOL>SimNoise(segment, stride, psd, randomness)<EOL><DEDENT>return noise_ts<EOL>
Create noise with a given psd. Return noise with a given psd. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- length : int The length of noise to generate in samples. delta_t : float The time step of the noise. psd : FrequencySeries The noise weighting to color the noise. seed : {0, int} The seed to generate the noise. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd.
f16012:m1
def noise_from_string(psd_name, length, delta_t, seed=None, low_frequency_cutoff=<NUM_LIT>):
import pycbc.psd<EOL>delta_f = <NUM_LIT:1.0> / <NUM_LIT:8><EOL>flen = int(<NUM_LIT> / delta_t / delta_f) + <NUM_LIT:1><EOL>psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff)<EOL>return noise_from_psd(int(length), delta_t, psd, seed=seed)<EOL>
Create noise from an analytic PSD Return noise from the chosen PSD. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- psd_name : str Name of the analytic PSD to use. low_fr length : int The length of noise to generate in samples. delta_t : float The time step of the noise. seed : {None, int} The seed to generate the noise. low_frequency_cutof : {10.0, float} The low frequency cutoff to pass to the PSD generation. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd.
f16012:m2
def block(seed):
num = SAMPLE_RATE * BLOCK_SIZE<EOL>rng = RandomState(seed % <NUM_LIT:2>**<NUM_LIT:32>)<EOL>variance = SAMPLE_RATE / <NUM_LIT:2><EOL>return rng.normal(size=num, scale=variance**<NUM_LIT:0.5>)<EOL>
Return block of normal random numbers Parameters ---------- seed : {None, int} The seed to generate the noise.sd Returns -------- noise : numpy.ndarray Array of random numbers
f16013:m0
def normal(start, end, seed=<NUM_LIT:0>):
<EOL>s = int(start / BLOCK_SIZE)<EOL>e = int(end / BLOCK_SIZE)<EOL>if end % BLOCK_SIZE == <NUM_LIT:0>:<EOL><INDENT>e -= <NUM_LIT:1><EOL><DEDENT>sv = RandomState(seed).randint(-<NUM_LIT:2>**<NUM_LIT:50>, <NUM_LIT:2>**<NUM_LIT:50>)<EOL>data = numpy.concatenate([block(i + sv) for i in numpy.arange(s, e + <NUM_LIT:1>, <NUM_LIT:1>)])<EOL>ts = TimeSeries(data, delta_t=<NUM_LIT:1.0> / SAMPLE_RATE, epoch=start)<EOL>return ts.time_slice(start, end)<EOL>
Generate data with a white Gaussian (normal) distribution Parameters ---------- start_time : int Start time in GPS seconds to generate noise end_time : int End time in GPS seconds to generate nosie seed : {None, int} The seed to generate the noise. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise
f16013:m1
def colored_noise(psd, start_time, end_time, seed=<NUM_LIT:0>, low_frequency_cutoff=<NUM_LIT:1.0>):
psd = psd.copy()<EOL>flen = int(SAMPLE_RATE / psd.delta_f) / <NUM_LIT:2> + <NUM_LIT:1><EOL>oldlen = len(psd)<EOL>psd.resize(flen)<EOL>max_val = psd.max()<EOL>for i in xrange(len(psd)):<EOL><INDENT>if i >= (oldlen-<NUM_LIT:1>):<EOL><INDENT>psd.data[i] = psd[oldlen - <NUM_LIT:2>]<EOL><DEDENT>if psd[i] == <NUM_LIT:0>:<EOL><INDENT>psd.data[i] = max_val<EOL><DEDENT><DEDENT>wn_dur = int(end_time - start_time) + <NUM_LIT:2>*FILTER_LENGTH<EOL>if psd.delta_f >= <NUM_LIT:1.> / (<NUM_LIT>*FILTER_LENGTH):<EOL><INDENT>psd = pycbc.psd.interpolate(psd, <NUM_LIT:1.0> / (<NUM_LIT>*FILTER_LENGTH))<EOL>psd = <NUM_LIT:1.> / pycbc.psd.inverse_spectrum_truncation(<NUM_LIT:1.>/psd,<EOL>FILTER_LENGTH * SAMPLE_RATE,<EOL>low_frequency_cutoff=low_frequency_cutoff,<EOL>trunc_method='<STR_LIT>')<EOL>psd = psd.astype(complex_same_precision_as(psd))<EOL>psd = psd.to_timeseries()<EOL>psd.roll(SAMPLE_RATE * FILTER_LENGTH)<EOL>psd.resize(wn_dur * SAMPLE_RATE)<EOL>psd.roll(-SAMPLE_RATE * FILTER_LENGTH)<EOL>psd = psd.to_frequencyseries()<EOL><DEDENT>else:<EOL><INDENT>psd = pycbc.psd.interpolate(psd, <NUM_LIT:1.0> / wn_dur)<EOL>psd = <NUM_LIT:1.> / pycbc.psd.inverse_spectrum_truncation(<NUM_LIT:1.>/psd,<EOL>FILTER_LENGTH * SAMPLE_RATE,<EOL>low_frequency_cutoff=low_frequency_cutoff,<EOL>trunc_method='<STR_LIT>')<EOL><DEDENT>kmin = int(low_frequency_cutoff / psd.delta_f)<EOL>psd[:kmin].clear()<EOL>asd = (psd.real())**<NUM_LIT:0.5><EOL>del psd<EOL>white_noise = normal(start_time - FILTER_LENGTH, end_time + FILTER_LENGTH,<EOL>seed=seed)<EOL>white_noise = white_noise.to_frequencyseries()<EOL>white_noise *= asd<EOL>del asd<EOL>colored = white_noise.to_timeseries()<EOL>del white_noise<EOL>return colored.time_slice(start_time, end_time)<EOL>
Create noise from a PSD Return noise from the chosen PSD. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- psd : pycbc.types.FrequencySeries PSD to color the noise start_time : int Start time in GPS seconds to generate noise end_time : int End time in GPS seconds to generate nosie seed : {None, int} The seed to generate the noise. low_frequency_cutof : {1.0, float} The low frequency cutoff to pass to the PSD generation. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd.
f16013:m2
def noise_from_string(psd_name, start_time, end_time, seed=<NUM_LIT:0>, low_frequency_cutoff=<NUM_LIT:1.0>):
delta_f = <NUM_LIT:1.0> / FILTER_LENGTH<EOL>flen = int(SAMPLE_RATE / delta_f) / <NUM_LIT:2> + <NUM_LIT:1><EOL>psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff)<EOL>return colored_noise(psd, start_time, end_time,<EOL>seed=seed,<EOL>low_frequency_cutoff=low_frequency_cutoff)<EOL>
Create noise from an analytic PSD Return noise from the chosen PSD. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- psd_name : str Name of the analytic PSD to use. start_time : int Start time in GPS seconds to generate noise end_time : int End time in GPS seconds to generate nosie seed : {None, int} The seed to generate the noise. low_frequency_cutof : {10.0, float} The low frequency cutoff to pass to the PSD generation. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd.
f16013:m3
def integral_element(mu, pdf):
dmu = mu[<NUM_LIT:1>:] - mu[:-<NUM_LIT:1>]<EOL>bin_mean = (pdf[<NUM_LIT:1>:] + pdf[:-<NUM_LIT:1>]) / <NUM_LIT><EOL>return dmu * bin_mean<EOL>
Returns an array of elements of the integrand dP = p(mu) dmu for a density p(mu) defined at sample values mu ; samples need not be equally spaced. Uses a simple trapezium rule. Number of dP elements is 1 - (number of mu samples).
f16014:m0
def normalize_pdf(mu, pofmu):
if min(pofmu) < <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>if min(mu) < <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>dp = integral_element(mu, pofmu)<EOL>return mu, pofmu/sum(dp)<EOL>
Takes a function pofmu defined at rate sample values mu and normalizes it to be a suitable pdf. Both mu and pofmu must be arrays or lists of the same length.
f16014:m1
def compute_upper_limit(mu_in, post, alpha=<NUM_LIT>):
if <NUM_LIT:0> < alpha < <NUM_LIT:1>:<EOL><INDENT>dp = integral_element(mu_in, post)<EOL>high_idx = bisect.bisect_left(dp.cumsum() / dp.sum(), alpha)<EOL>mu_high = mu_in[high_idx]<EOL><DEDENT>elif alpha == <NUM_LIT:1>:<EOL><INDENT>mu_high = numpy.max(mu_in[post > <NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return mu_high<EOL>
Returns the upper limit mu_high of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized.
f16014:m2
def compute_lower_limit(mu_in, post, alpha=<NUM_LIT>):
if <NUM_LIT:0> < alpha < <NUM_LIT:1>:<EOL><INDENT>dp = integral_element(mu_in, post)<EOL>low_idx = bisect.bisect_right(dp.cumsum() / dp.sum(), <NUM_LIT:1> - alpha)<EOL>mu_low = mu_in[low_idx]<EOL><DEDENT>elif alpha == <NUM_LIT:1>:<EOL><INDENT>mu_low = numpy.min(mu_in[post > <NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return mu_low<EOL>
Returns the lower limit mu_low of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized.
f16014:m3
def confidence_interval_min_width(mu, post, alpha=<NUM_LIT>):
if not <NUM_LIT:0> < alpha < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>alpha_step = <NUM_LIT><EOL>mu_low = numpy.min(mu)<EOL>mu_high = numpy.max(mu)<EOL>for ai in numpy.arange(<NUM_LIT:0>, <NUM_LIT:1> - alpha, alpha_step):<EOL><INDENT>ml = compute_lower_limit(mu, post, <NUM_LIT:1> - ai)<EOL>mh = compute_upper_limit(mu, post, alpha + ai)<EOL>if mh - ml < mu_high - mu_low:<EOL><INDENT>mu_low = ml<EOL>mu_high = mh<EOL><DEDENT><DEDENT>return mu_low, mu_high<EOL>
Returns the minimal-width confidence interval [mu_low, mu_high] of confidence level alpha for a posterior distribution post on the parameter mu.
f16014:m4
def hpd_coverage(mu, pdf, thresh):
dp = integral_element(mu, pdf)<EOL>bin_mean = (pdf[<NUM_LIT:1>:] + pdf[:-<NUM_LIT:1>]) / <NUM_LIT><EOL>return dp[bin_mean > thresh].sum()<EOL>
Integrates a pdf over mu taking only bins where the mean over the bin is above a given threshold This gives the coverage of the HPD interval for the given threshold.
f16014:m5
def hpd_threshold(mu_in, post, alpha, tol):
norm_post = normalize_pdf(mu_in, post)<EOL>p_minus = <NUM_LIT:0.0><EOL>p_plus = max(post)<EOL>while abs(hpd_coverage(mu_in, norm_post, p_minus) -<EOL>hpd_coverage(mu_in, norm_post, p_plus)) >= tol:<EOL><INDENT>p_test = (p_minus + p_plus) / <NUM_LIT><EOL>if hpd_coverage(mu_in, post, p_test) >= alpha:<EOL><INDENT>p_minus = p_test<EOL><DEDENT>else:<EOL><INDENT>p_plus = p_test<EOL><DEDENT><DEDENT>return p_minus<EOL>
For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance.
f16014:m6
def hpd_credible_interval(mu_in, post, alpha=<NUM_LIT>, tolerance=<NUM_LIT>):
if alpha == <NUM_LIT:1>:<EOL><INDENT>nonzero_samples = mu_in[post > <NUM_LIT:0>]<EOL>mu_low = numpy.min(nonzero_samples)<EOL>mu_high = numpy.max(nonzero_samples)<EOL><DEDENT>elif <NUM_LIT:0> < alpha < <NUM_LIT:1>:<EOL><INDENT>pthresh = hpd_threshold(mu_in, post, alpha, tol=tolerance)<EOL>samples_over_threshold = mu_in[post > pthresh]<EOL>mu_low = numpy.min(samples_over_threshold)<EOL>mu_high = numpy.max(samples_over_threshold)<EOL><DEDENT>return mu_low, mu_high<EOL>
Returns the minimum and maximum rate values of the HPD (Highest Posterior Density) credible interval for a posterior post defined at the sample values mu_in. Samples need not be uniformly spaced and posterior need not be normalized. Will not return a correct credible interval if the posterior is multimodal and the correct interval is not contiguous; in this case will over-cover by including the whole range from minimum to maximum mu.
f16014:m7
def compute_efficiency(f_dist, m_dist, dbins):
efficiency = numpy.zeros(len(dbins) - <NUM_LIT:1>)<EOL>error = numpy.zeros(len(dbins) - <NUM_LIT:1>)<EOL>for j, dlow in enumerate(dbins[:-<NUM_LIT:1>]):<EOL><INDENT>dhigh = dbins[j + <NUM_LIT:1>]<EOL>found = numpy.sum((dlow <= f_dist) * (f_dist < dhigh))<EOL>missed = numpy.sum((dlow <= m_dist) * (m_dist < dhigh))<EOL>if found+missed == <NUM_LIT:0>:<EOL><INDENT>missed = <NUM_LIT:1.><EOL><DEDENT>efficiency[j] = float(found) / (found + missed)<EOL>error[j] = numpy.sqrt(efficiency[j] * (<NUM_LIT:1> - efficiency[j]) /<EOL>(found + missed))<EOL><DEDENT>return efficiency, error<EOL>
Compute the efficiency as a function of distance for the given sets of found and missed injection distances. Note that injections that do not fit into any dbin get lost :(
f16014:m9
def filter_injections_by_mass(injs, mbins, bin_num, bin_type, bin_num2=None):
if bin_type == "<STR_LIT>":<EOL><INDENT>m1bins = numpy.concatenate((mbins.lower()[<NUM_LIT:0>],<EOL>numpy.array([mbins.upper()[<NUM_LIT:0>][-<NUM_LIT:1>]])))<EOL>m1lo = m1bins[bin_num]<EOL>m1hi = m1bins[bin_num + <NUM_LIT:1>]<EOL>m2bins = numpy.concatenate((mbins.lower()[<NUM_LIT:1>],<EOL>numpy.array([mbins.upper()[<NUM_LIT:1>][-<NUM_LIT:1>]])))<EOL>m2lo = m2bins[bin_num2]<EOL>m2hi = m2bins[bin_num2 + <NUM_LIT:1>]<EOL>newinjs = [l for l in injs if<EOL>((m1lo <= l.mass1 < m1hi and m2lo <= l.mass2 < m2hi) or<EOL>(m1lo <= l.mass2 < m1hi and m2lo <= l.mass1 < m2hi))]<EOL>return newinjs<EOL><DEDENT>mbins = numpy.concatenate((mbins.lower()[<NUM_LIT:0>],<EOL>numpy.array([mbins.upper()[<NUM_LIT:0>][-<NUM_LIT:1>]])))<EOL>mlow = mbins[bin_num]<EOL>mhigh = mbins[bin_num + <NUM_LIT:1>]<EOL>if bin_type == "<STR_LIT>":<EOL><INDENT>newinjs = [l for l in injs if (mlow <= l.mchirp < mhigh)]<EOL><DEDENT>elif bin_type == "<STR_LIT>":<EOL><INDENT>newinjs = [l for l in injs if (mlow <= l.mass1 + l.mass2 < mhigh)]<EOL><DEDENT>elif bin_type == "<STR_LIT>":<EOL><INDENT>newinjs = [l for l in injs if (mlow <= l.mass1 < mhigh)]<EOL><DEDENT>elif bin_type == "<STR_LIT>":<EOL><INDENT>if bin_num in [<NUM_LIT:0>, <NUM_LIT:2>]:<EOL><INDENT>newinjs = [l for l in injs if<EOL>(mlow <= l.mass1 < mhigh and mlow <= l.mass2 < mhigh)]<EOL><DEDENT>else:<EOL><INDENT>newinjs = [l for l in injs if (mbins[<NUM_LIT:0>] <= l.mass1 < mbins[<NUM_LIT:1>] and<EOL>mbins[<NUM_LIT:2>] <= l.mass2 < mbins[<NUM_LIT:3>])]<EOL>newinjs += [l for l in injs if (mbins[<NUM_LIT:0>] <= l.mass2 < mbins[<NUM_LIT:1>] and<EOL>mbins[<NUM_LIT:2>] <= l.mass1 < mbins[<NUM_LIT:3>])]<EOL><DEDENT><DEDENT>return newinjs<EOL>
For a given set of injections (sim_inspiral rows), return the subset of injections that fall within the given mass range.
f16014:m11
def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None):
<EOL>volArray = bin_utils.BinnedArray(mass_bins)<EOL>vol2Array = bin_utils.BinnedArray(mass_bins)<EOL>foundArray = bin_utils.BinnedArray(mass_bins)<EOL>missedArray = bin_utils.BinnedArray(mass_bins)<EOL>effvmass = []<EOL>errvmass = []<EOL>if bin_type == "<STR_LIT>":<EOL><INDENT>for j, mc1 in enumerate(mass_bins.centres()[<NUM_LIT:0>]):<EOL><INDENT>for k, mc2 in enumerate(mass_bins.centres()[<NUM_LIT:1>]):<EOL><INDENT>newfound = filter_injections_by_mass(<EOL>found, mass_bins, j, bin_type, k)<EOL>newmissed = filter_injections_by_mass(<EOL>missed, mass_bins, j, bin_type, k)<EOL>foundArray[(mc1, mc2)] = len(newfound)<EOL>missedArray[(mc1, mc2)] = len(newmissed)<EOL>meaneff, efferr, meanvol, volerr = mean_efficiency_volume(<EOL>newfound, newmissed, dbins)<EOL>effvmass.append(meaneff)<EOL>errvmass.append(efferr)<EOL>volArray[(mc1, mc2)] = meanvol<EOL>vol2Array[(mc1, mc2)] = volerr<EOL><DEDENT><DEDENT>return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass<EOL><DEDENT>for j, mc in enumerate(mass_bins.centres()[<NUM_LIT:0>]):<EOL><INDENT>newfound = filter_injections_by_mass(found, mass_bins, j, bin_type)<EOL>newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type)<EOL>foundArray[(mc, )] = len(newfound)<EOL>missedArray[(mc, )] = len(newmissed)<EOL>meaneff, efferr, meanvol, volerr = mean_efficiency_volume(<EOL>newfound, newmissed, dbins)<EOL>effvmass.append(meaneff)<EOL>errvmass.append(efferr)<EOL>volArray[(mc, )] = meanvol<EOL>vol2Array[(mc, )] = volerr<EOL><DEDENT>return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass<EOL>
Compute the average luminosity an experiment was sensitive to Assumes that luminosity is uniformly distributed in space. Input is the sets of found and missed injections.
f16014:m12
def get_cosmology(cosmology=None, **kwargs):
if kwargs and cosmology is not None:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>if isinstance(cosmology, astropy.cosmology.FlatLambdaCDM):<EOL><INDENT>return cosmology<EOL><DEDENT>if kwargs:<EOL><INDENT>cosmology = astropy.cosmology.FlatLambdaCDM(**kwargs)<EOL><DEDENT>else:<EOL><INDENT>if cosmology is None:<EOL><INDENT>cosmology = DEFAULT_COSMOLOGY<EOL><DEDENT>if cosmology not in astropy.cosmology.parameters.available:<EOL><INDENT>raise ValueError("<STR_LIT>".format(cosmology))<EOL><DEDENT>cosmology = getattr(astropy.cosmology, cosmology)<EOL><DEDENT>return cosmology<EOL>
r"""Gets an astropy cosmology class. Parameters ---------- cosmology : str or astropy.cosmology.FlatLambdaCDM, optional The name of the cosmology to use. For the list of options, see :py:attr:`astropy.cosmology.parameters.available`. If None, and no other keyword arguments are provided, will default to :py:attr:`DEFAULT_COSMOLOGY`. If an instance of :py:class:`astropy.cosmology.FlatLambdaCDM`, will just return that. \**kwargs : If any other keyword arguments are provided they will be passed to :py:attr:`astropy.cosmology.FlatLambdaCDM` to create a custom cosmology. Returns ------- astropy.cosmology.FlatLambdaCDM The cosmology to use. Examples -------- Use the default: >>> from pycbc.cosmology import get_cosmology >>> get_cosmology() FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307, Tcmb0=2.725 K, Neff=3.05, m_nu=[0. 0. 0.06] eV, Ob0=0.0486) Use properties measured by WMAP instead: >>> get_cosmology("WMAP9") FlatLambdaCDM(name="WMAP9", H0=69.3 km / (Mpc s), Om0=0.286, Tcmb0=2.725 K, Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.0463) Create your own cosmology (see :py:class:`astropy.cosmology.FlatLambdaCDM` for details on the default values used): >>> get_cosmology(H0=70., Om0=0.3) FlatLambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)
f16015:m0
def z_at_value(func, fval, unit, zmax=<NUM_LIT>, **kwargs):
fval, input_is_array = ensurearray(fval)<EOL>if fval.size == <NUM_LIT:1> and fval.ndim == <NUM_LIT:0>:<EOL><INDENT>fval = fval.reshape(<NUM_LIT:1>)<EOL><DEDENT>zs = numpy.zeros(fval.shape, dtype=float) <EOL>for (ii, val) in enumerate(fval):<EOL><INDENT>try:<EOL><INDENT>zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax,<EOL>**kwargs)<EOL><DEDENT>except CosmologyError:<EOL><INDENT>zs[ii] = numpy.inf<EOL><DEDENT><DEDENT>replacemask = numpy.isinf(zs)<EOL>if replacemask.any():<EOL><INDENT>counter = <NUM_LIT:0> <EOL>while replacemask.any():<EOL><INDENT>kwargs['<STR_LIT>'] = zmax<EOL>zmax = <NUM_LIT:10> * zmax<EOL>idx = numpy.where(replacemask)<EOL>for ii in idx:<EOL><INDENT>val = fval[ii]<EOL>try:<EOL><INDENT>zs[ii] = astropy.cosmology.z_at_value(<EOL>func, val*unit, zmax=zmax, **kwargs)<EOL>replacemask[ii] = False<EOL><DEDENT>except CosmologyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>counter += <NUM_LIT:1><EOL>if counter == <NUM_LIT:5>:<EOL><INDENT>logging.warning("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(zmax))<EOL>break<EOL><DEDENT><DEDENT><DEDENT>return formatreturn(zs, input_is_array)<EOL>
r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays. Getting a z for a cosmological quantity involves numerically inverting ``func``. The ``zmax`` argument sets how large of a z to guess (see :py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than ``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still is not large enough, will just return ``numpy.inf``. Parameters ---------- func : function or method A function that takes redshift as input. fval : float The value of ``func(z)``. unit : astropy.unit The unit of ``fval``. zmax : float, optional The initial maximum search limit for ``z``. Default is 1000. \**kwargs : All other keyword arguments are passed to :py:func:``astropy.cosmology.z_at_value``. Returns ------- float The redshift at the requested values.
f16015:m1
def _redshift(distance, **kwargs):
cosmology = get_cosmology(**kwargs)<EOL>return z_at_value(cosmology.luminosity_distance, distance, units.Mpc)<EOL>
r"""Uses astropy to get redshift from the given luminosity distance. Parameters ---------- distance : float The luminosity distance, in Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The redshift corresponding to the given luminosity distance.
f16015:m2
def redshift(distance, **kwargs):
cosmology = get_cosmology(**kwargs)<EOL>try:<EOL><INDENT>z = _d2zs[cosmology.name](distance)<EOL><DEDENT>except KeyError:<EOL><INDENT>z = _redshift(distance, cosmology=cosmology)<EOL><DEDENT>return z<EOL>
r"""Returns the redshift associated with the given luminosity distance. If the requested cosmology is one of the pre-defined ones in :py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is used to provide a fast interpolation. This takes a few seconds to setup on the first call. Parameters ---------- distance : float The luminosity distance, in Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The redshift corresponding to the given distance.
f16015:m3
def redshift_from_comoving_volume(vc, **kwargs):
cosmology = get_cosmology(**kwargs)<EOL>return z_at_value(cosmology.comoving_volume, vc, units.Mpc**<NUM_LIT:3>)<EOL>
r"""Returns the redshift from the given comoving volume. Parameters ---------- vc : float The comoving volume, in units of cubed Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The redshift at the given comoving volume.
f16015:m4
def distance_from_comoving_volume(vc, **kwargs):
cosmology = get_cosmology(**kwargs)<EOL>z = redshift_from_comoving_volume(vc, cosmology=cosmology)<EOL>return cosmology.luminosity_distance(z).value<EOL>
r"""Returns the luminosity distance from the given comoving volume. Parameters ---------- vc : float The comoving volume, in units of cubed Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The luminosity distance at the given comoving volume.
f16015:m5
def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,<EOL>**kwargs):
cosmology = get_cosmology(**kwargs)<EOL>val = getattr(cosmology, quantity)(z)<EOL>if strip_unit:<EOL><INDENT>val = val.value<EOL><DEDENT>return val<EOL>
r"""Returns the value of a cosmological quantity (e.g., age) at a redshift. Parameters ---------- z : float The redshift. quantity : str The name of the quantity to get. The name may be any attribute of :py:class:`astropy.cosmology.FlatLambdaCDM`. strip_unit : bool, optional Just return the value of the quantity, sans units. Default is True. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float or astropy.units.quantity : The value of the quantity at the requested value. If ``strip_unit`` is ``True``, will return the value. Otherwise, will return the value with units.
f16015:m6
def setup_interpolant(self):
<EOL>zs = numpy.linspace(<NUM_LIT:0.>, <NUM_LIT:1.>, num=self.numpoints)<EOL>ds = self.cosmology.luminosity_distance(zs).value<EOL>self.nearby_d2z = interpolate.interp1d(ds, zs, kind='<STR_LIT>',<EOL>bounds_error=False)<EOL>zs = numpy.logspace(<NUM_LIT:0>, numpy.log10(self.default_maxz),<EOL>num=self.numpoints)<EOL>ds = self.cosmology.luminosity_distance(zs).value<EOL>self.faraway_d2z = interpolate.interp1d(ds, zs, kind='<STR_LIT>',<EOL>bounds_error=False)<EOL>self.default_maxdist = ds.max()<EOL>
Initializes the z(d) interpolation.
f16015:c0:m1
def get_redshift(self, dist):
dist, input_is_array = ensurearray(dist)<EOL>try:<EOL><INDENT>zs = self.nearby_d2z(dist)<EOL><DEDENT>except TypeError:<EOL><INDENT>self.setup_interpolant()<EOL>zs = self.nearby_d2z(dist)<EOL><DEDENT>replacemask = numpy.isnan(zs)<EOL>if replacemask.any():<EOL><INDENT>zs[replacemask] = self.faraway_d2z(dist[replacemask])<EOL>replacemask = numpy.isnan(zs)<EOL><DEDENT>if replacemask.any():<EOL><INDENT>if not (dist > <NUM_LIT:0.>).all() and numpy.isfinite(dist).all():<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>zs[replacemask] = _redshift(dist[replacemask],<EOL>cosmology=self.cosmology)<EOL><DEDENT>return formatreturn(zs, input_is_array)<EOL>
Returns the redshift for the given distance.
f16015:c0:m2
def process_full_data(fname, rhomin, mass1, mass2, lo_mchirp, hi_mchirp):
with h5py.File(fname, '<STR_LIT:r>') as bulk:<EOL><INDENT>id_bkg = bulk['<STR_LIT>'][:]<EOL>id_fg = bulk['<STR_LIT>'][:]<EOL>mchirp_bkg = mchirp_from_mass1_mass2(mass1[id_bkg], mass2[id_bkg])<EOL>bound = np.sign((mchirp_bkg - lo_mchirp) * (hi_mchirp - mchirp_bkg))<EOL>idx_bkg = np.where(bound == <NUM_LIT:1>)<EOL>mchirp_fg = mchirp_from_mass1_mass2(mass1[id_fg], mass2[id_fg])<EOL>bound = np.sign((mchirp_fg - lo_mchirp) * (hi_mchirp - mchirp_fg))<EOL>idx_fg = np.where(bound == <NUM_LIT:1>)<EOL>zerolagstat = bulk['<STR_LIT>'][:][idx_fg]<EOL>cstat_back_exc = bulk['<STR_LIT>'][:][idx_bkg]<EOL>dec_factors = bulk['<STR_LIT>'][:][idx_bkg]<EOL><DEDENT>return {'<STR_LIT>': zerolagstat[zerolagstat > rhomin],<EOL>'<STR_LIT>': dec_factors[cstat_back_exc > rhomin],<EOL>'<STR_LIT>': cstat_back_exc[cstat_back_exc > rhomin]}<EOL>
Read the zero-lag and time-lag triggers identified by templates in a specified range of chirp mass. Parameters ---------- hdfile: File that stores all the triggers rhomin: float Minimum value of SNR threhold (will need including ifar) mass1: array First mass of the waveform in the template bank mass2: array Second mass of the waveform in the template bank lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for the template Returns ------- dictionary containing foreground triggers and background information
f16017:m0
def save_bkg_falloff(fname_statmap, fname_bank, path, rhomin, lo_mchirp, hi_mchirp):
with h5py.File(fname_bank, '<STR_LIT:r>') as bulk:<EOL><INDENT>mass1_bank = bulk['<STR_LIT>'][:]<EOL>mass2_bank = bulk['<STR_LIT>'][:]<EOL>full_data = process_full_data(fname_statmap, rhomin,<EOL>mass1_bank, mass2_bank, lo_mchirp, hi_mchirp)<EOL><DEDENT>max_bg_stat = np.max(full_data['<STR_LIT>'])<EOL>bg_bins = np.linspace(rhomin, max_bg_stat, <NUM_LIT>)<EOL>bg_counts = np.histogram(full_data['<STR_LIT>'],<EOL>weights=full_data['<STR_LIT>'], bins=bg_bins)[<NUM_LIT:0>]<EOL>zerolagstat = full_data['<STR_LIT>']<EOL>coincs = zerolagstat[zerolagstat >= rhomin]<EOL>bkg = (bg_bins[:-<NUM_LIT:1>], bg_bins[<NUM_LIT:1>:], bg_counts)<EOL>return bkg, coincs<EOL>
Read the STATMAP files to derive snr falloff for the background events. Save the output to a txt file Bank file is also provided to restrict triggers to BBH templates. Parameters ---------- fname_statmap: string STATMAP file containing trigger information fname_bank: string File name of the template bank path: string Destination where txt file is saved rhomin: float Minimum value of SNR threhold (will need including ifar) lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for template
f16017:m1
def log_rho_bg(trigs, bins, counts):
trigs = np.atleast_1d(trigs)<EOL>N = sum(counts)<EOL>assert np.all(trigs >= np.min(bins)),'<STR_LIT>'<EOL>if np.any(trigs >= np.max(bins)):<EOL><INDENT>N = N + <NUM_LIT:1><EOL><DEDENT>log_rhos = []<EOL>for t in trigs:<EOL><INDENT>if t >= np.max(bins):<EOL><INDENT>log_rhos.append(-log(N)-log(np.max(trigs) - bins[-<NUM_LIT:1>]))<EOL><DEDENT>else:<EOL><INDENT>i = bisect.bisect(bins, t) - <NUM_LIT:1><EOL>if counts[i] == <NUM_LIT:0>:<EOL><INDENT>counts[i] = <NUM_LIT:1><EOL><DEDENT>log_rhos.append(log(counts[i]) - log(bins[i+<NUM_LIT:1>] - bins[i]) - log(N))<EOL><DEDENT><DEDENT>return np.array(log_rhos)<EOL>
Calculate the log of background fall-off Parameters ---------- trigs: array SNR values of all the triggers bins: string bins for histogrammed triggers path: string counts for histogrammed triggers Returns ------- array
f16017:m2
def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg):
Lb = np.random.uniform(<NUM_LIT:0.>, maxfg, len(Rf))<EOL>pquit = <NUM_LIT:0><EOL>while pquit < <NUM_LIT:0.1>:<EOL><INDENT>nsamp = len(Lb)<EOL>Rf_sel = np.random.choice(Rf, nsamp)<EOL>vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel))<EOL>Lf = Rf_sel * vt<EOL>log_Lf, log_Lb = log(Lf), log(Lb)<EOL>plR = <NUM_LIT:0><EOL>for lfr in log_fg_ratios:<EOL><INDENT>plR += np.logaddexp(lfr + log_Lf, log_Lb)<EOL><DEDENT>plR -= (Lf + Lb)<EOL>plRn = plR - max(plR)<EOL>idx = np.exp(plRn) > np.random.random(len(plRn))<EOL>pquit = ss.stats.ks_2samp(Lb, Lb[idx])[<NUM_LIT:1>]<EOL>Lb = Lb[idx]<EOL><DEDENT>return Rf_sel[idx], Lf[idx], Lb<EOL>
Function to fit the likelihood Fixme
f16017:m4
def _optm(x, alpha, mu, sigma):
return ss.skewnorm.pdf(x, alpha, mu, sigma)<EOL>
Return probability density of skew-lognormal See scipy.optimize.curve_fit
f16017:m5
def fit(R):
lR = np.log(R)<EOL>mu_norm, sigma_norm = np.mean(lR), np.std(lR)<EOL>xs = np.linspace(min(lR), max(lR), <NUM_LIT:200>)<EOL>kde = ss.gaussian_kde(lR)<EOL>pxs = kde(xs)<EOL>ff = optimize.curve_fit(_optm, xs, pxs, p0 = [<NUM_LIT:0.1>, mu_norm, sigma_norm])[<NUM_LIT:0>]<EOL>return ff[<NUM_LIT:0>], ff[<NUM_LIT:1>], ff[<NUM_LIT:2>]<EOL>
Fit skew - lognormal to the rate samples achived from a prior analysis Parameters ---------- R: array Rate samples Returns ------- ff[0]: float The skewness ff[1]: float The mean ff[2]: float The standard deviation
f16017:m6
def skew_lognormal_samples(alpha, mu, sigma, minrp, maxrp):
nsamp = <NUM_LIT><EOL>lRu = np.random.uniform(minrp, maxrp, nsamp)<EOL>plRu = ss.skewnorm.pdf(lRu, alpha, mu, sigma)<EOL>rndn = np.random.random(nsamp)<EOL>maxp = max(plRu)<EOL>idx = np.where(plRu/maxp > rndn)<EOL>log_Rf = lRu[idx]<EOL>Rfs = np.exp(log_Rf)<EOL>return Rfs<EOL>
Returns a large number of Skew lognormal samples Parameters ---------- alpha: float Skewness of the distribution mu: float Mean of the distribution sigma: float Scale of the distribution minrp: float Minimum value for the samples maxrp: float Maximum value for the samples Returns ------- Rfs: array Large number of samples (may need fixing)
f16017:m7
def prob_lnm(m1, m2, s1z, s2z, **kwargs):
min_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>max_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>max_mtotal = min_mass + max_mass<EOL>m1, m2 = np.array(m1), np.array(m2)<EOL>C_lnm = integrate.quad(lambda x: (log(max_mtotal - x) - log(min_mass))/x, min_mass, max_mass)[<NUM_LIT:0>]<EOL>xx = np.minimum(m1, m2)<EOL>m1 = np.maximum(m1, m2)<EOL>m2 = xx<EOL>bound = np.sign(max_mtotal - m1 - m2)<EOL>bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)<EOL>idx = np.where(bound != <NUM_LIT:2>)<EOL>p_m1_m2 = (<NUM_LIT:1>/C_lnm)*(<NUM_LIT:1.>/m1)*(<NUM_LIT:1.>/m2)<EOL>p_m1_m2[idx] = <NUM_LIT:0><EOL>return p_m1_m2<EOL>
Return probability density for uniform in log Parameters ---------- m1: array Component masses 1 m2: array Component masses 2 s1z: array Aligned spin 1(Not in use currently) s2z: Aligned spin 2(Not in use currently) **kwargs: string Keyword arguments as model parameters Returns ------- p_m1_m2: array The probability density for m1, m2 pair
f16017:m8
def prob_imf(m1, m2, s1z, s2z, **kwargs):
min_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>max_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>alpha = kwargs.get('<STR_LIT>', -<NUM_LIT>)<EOL>max_mtotal = min_mass + max_mass<EOL>m1, m2 = np.array(m1), np.array(m2)<EOL>C_imf = max_mass**(alpha + <NUM_LIT:1>)/(alpha + <NUM_LIT:1>)<EOL>C_imf -= min_mass**(alpha + <NUM_LIT:1>)/(alpha + <NUM_LIT:1>)<EOL>xx = np.minimum(m1, m2)<EOL>m1 = np.maximum(m1, m2)<EOL>m2 = xx<EOL>bound = np.sign(max_mtotal - m1 - m2)<EOL>bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)<EOL>idx = np.where(bound != <NUM_LIT:2>)<EOL>p_m1_m2 = np.zeros_like(m1)<EOL>idx = np.where(m1 <= max_mtotal/<NUM_LIT>)<EOL>p_m1_m2[idx] = (<NUM_LIT:1.>/C_imf) * m1[idx]**alpha /(m1[idx] - min_mass)<EOL>idx = np.where(m1 > max_mtotal/<NUM_LIT>)<EOL>p_m1_m2[idx] = (<NUM_LIT:1.>/C_imf) * m1[idx]**alpha /(max_mass - m1[idx])<EOL>p_m1_m2[idx] = <NUM_LIT:0><EOL>return p_m1_m2/<NUM_LIT><EOL>
Return probability density for power-law Parameters ---------- m1: array Component masses 1 m2: array Component masses 2 s1z: array Aligned spin 1(Not in use currently) s2z: Aligned spin 2(Not in use currently) **kwargs: string Keyword arguments as model parameters Returns ------- p_m1_m2: array the probability density for m1, m2 pair
f16017:m9
def prob_flat(m1, m2, s1z, s2z, **kwargs):
min_mass = kwargs.get('<STR_LIT>', <NUM_LIT:1.>)<EOL>max_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>bound = np.sign(m1 - m2)<EOL>bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)<EOL>idx = np.where(bound != <NUM_LIT:2>)<EOL>p_m1_m2 = <NUM_LIT> / (max_mass - min_mass)**<NUM_LIT:2><EOL>p_m1_m2[idx] = <NUM_LIT:0><EOL>return p_m1_m2<EOL>
Return probability density for uniform in component mass Parameters ---------- m1: array Component masses 1 m2: array Component masses 2 s1z: array Aligned spin 1 (not in use currently) s2z: Aligned spin 2 (not in use currently) **kwargs: string Keyword arguments as model parameters Returns ------- p_m1_m2: array the probability density for m1, m2 pair
f16017:m10
def draw_imf_samples(**kwargs):
alpha_salpeter = kwargs.get('<STR_LIT>', -<NUM_LIT>)<EOL>nsamples = kwargs.get('<STR_LIT>', <NUM_LIT:1>)<EOL>min_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>max_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>max_mtotal = min_mass + max_mass<EOL>a = (max_mass/min_mass)**(alpha_salpeter + <NUM_LIT:1.0>) - <NUM_LIT:1.0><EOL>beta = <NUM_LIT:1.0> / (alpha_salpeter + <NUM_LIT:1.0>)<EOL>k = nsamples * int(<NUM_LIT> + log(<NUM_LIT:1> + <NUM_LIT>/nsamples))<EOL>aa = min_mass * (<NUM_LIT:1.0> + a * np.random.random(k))**beta<EOL>bb = np.random.uniform(min_mass, aa, k)<EOL>idx = np.where(aa + bb < max_mtotal)<EOL>m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]<EOL>return np.resize(m1, nsamples), np.resize(m2, nsamples)<EOL>
Draw samples for power-law model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass
f16017:m11
def draw_lnm_samples(**kwargs):
<EOL>nsamples = kwargs.get('<STR_LIT>', <NUM_LIT:1>)<EOL>min_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>max_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>max_mtotal = min_mass + max_mass<EOL>lnmmin = log(min_mass)<EOL>lnmmax = log(max_mass)<EOL>k = nsamples * int(<NUM_LIT> + log(<NUM_LIT:1> + <NUM_LIT>/nsamples))<EOL>aa = np.exp(np.random.uniform(lnmmin, lnmmax, k))<EOL>bb = np.exp(np.random.uniform(lnmmin, lnmmax, k))<EOL>idx = np.where(aa + bb < max_mtotal)<EOL>m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]<EOL>return np.resize(m1, nsamples), np.resize(m2, nsamples)<EOL>
Draw samples for uniform-in-log model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass
f16017:m12
def draw_flat_samples(**kwargs):
<EOL>nsamples = kwargs.get('<STR_LIT>', <NUM_LIT:1>)<EOL>min_mass = kwargs.get('<STR_LIT>', <NUM_LIT:1.>)<EOL>max_mass = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>m1 = np.random.uniform(min_mass, max_mass, nsamples)<EOL>m2 = np.random.uniform(min_mass, max_mass, nsamples)<EOL>return np.maximum(m1, m2), np.minimum(m1, m2)<EOL>
Draw samples for uniform in mass Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass
f16017:m13
def mchirp_sampler_lnm(**kwargs):
m1, m2 = draw_lnm_samples(**kwargs)<EOL>mchirp_astro = mchirp_from_mass1_mass2(m1, m2)<EOL>return mchirp_astro<EOL>
Draw chirp mass samples for uniform-in-log model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population
f16017:m14
def mchirp_sampler_imf(**kwargs):
m1, m2 = draw_imf_samples(**kwargs)<EOL>mchirp_astro = mchirp_from_mass1_mass2(m1, m2)<EOL>return mchirp_astro<EOL>
Draw chirp mass samples for power-law model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population
f16017:m15
def mchirp_sampler_flat(**kwargs):
m1, m2 = draw_flat_samples(**kwargs)<EOL>mchirp_astro = mchirp_from_mass1_mass2(m1, m2)<EOL>return mchirp_astro<EOL>
Draw chirp mass samples for flat in mass model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population
f16017:m16
def read_injections(sim_files, m_dist, s_dist, d_dist):
injections = {}<EOL>min_d, max_d = <NUM_LIT>, <NUM_LIT:0><EOL>nf = len(sim_files)<EOL>for i in range(nf):<EOL><INDENT>key = str(i)<EOL>injections[key] = process_injections(sim_files[i])<EOL>injections[key]['<STR_LIT>'] = sim_files[i]<EOL>injections[key]['<STR_LIT>'] = m_dist[i]<EOL>injections[key]['<STR_LIT>'] = s_dist[i]<EOL>injections[key]['<STR_LIT>'] = d_dist[i]<EOL>mass1, mass2 = injections[key]['<STR_LIT>'], injections[key]['<STR_LIT>']<EOL>distance = injections[key]['<STR_LIT>']<EOL>mchirp = m1m2tomch(mass1, mass2)<EOL>injections[key]['<STR_LIT>'] = mchirp<EOL>injections[key]['<STR_LIT>'] = mass1 + mass2<EOL>injections[key]['<STR_LIT>'] = [min(mass1 + mass2), max(mass1 + mass2)]<EOL>injections[key]['<STR_LIT>'] = [min(mass1), max(mass1)]<EOL>injections[key]['<STR_LIT>'] = [min(mass2), max(mass2)]<EOL>injections[key]['<STR_LIT>'] = [min(distance), max(distance)]<EOL>min_d, max_d = min(min_d, min(distance)), max(max_d, max(distance))<EOL><DEDENT>injections['<STR_LIT>'] = [dlum_to_z(min_d), dlum_to_z(max_d)]<EOL>return injections<EOL>
Read all the injections from the files in the provided folder. The files must belong to individual set i.e. no files that combine all the injections in a run. Identify injection strategies and finds parameter boundaries. Collect injection according to GPS. Parameters ---------- sim_files: list List containign names of the simulation files m_dist: list The mass distribution used in the simulation runs s_dist: list The spin distribution used in the simulation runs d_dist: list The distance distribution used in the simulation runs Returns ------- injections: dictionary Contains the organized information about the injections
f16018:m0
def estimate_vt(injections, mchirp_sampler, model_pdf, **kwargs):<EOL>
thr_var = kwargs.get('<STR_LIT>')<EOL>thr_val = kwargs.get('<STR_LIT>')<EOL>nsamples = <NUM_LIT> <EOL>injections = copy.deepcopy(injections)<EOL>min_z, max_z = injections['<STR_LIT>']<EOL>V = quad(contracted_dVdc, <NUM_LIT:0.>, max_z)[<NUM_LIT:0>]<EOL>z_astro = astro_redshifts(min_z, max_z, nsamples)<EOL>astro_lum_dist = cosmo.luminosity_distance(z_astro).value<EOL>mch_astro = np.array(mchirp_sampler(nsamples = nsamples, **kwargs))<EOL>mch_astro_det = mch_astro * (<NUM_LIT:1.> + z_astro)<EOL>idx_within = np.zeros(nsamples)<EOL>for key in injections.keys():<EOL><INDENT>if key == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>mchirp = injections[key]['<STR_LIT>']<EOL>min_mchirp, max_mchirp = min(mchirp), max(mchirp)<EOL>distance = injections[key]['<STR_LIT>']<EOL>if injections[key]['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>d_min, d_max = min(distance), max(distance)<EOL><DEDENT>elif injections[key]['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>d_fid_min = min(distance / (mchirp/_mch_BNS)**(<NUM_LIT:5>/<NUM_LIT>))<EOL>d_fid_max = max(distance / (mchirp/_mch_BNS)**(<NUM_LIT:5>/<NUM_LIT>))<EOL>d_min = d_fid_min * (mch_astro_det/_mch_BNS)**(<NUM_LIT:5>/<NUM_LIT>)<EOL>d_max = d_fid_max * (mch_astro_det/_mch_BNS)**(<NUM_LIT:5>/<NUM_LIT>)<EOL><DEDENT>bound = np.sign((max_mchirp-mch_astro_det)*(mch_astro_det-min_mchirp))<EOL>bound += np.sign((d_max - astro_lum_dist)*(astro_lum_dist - d_min))<EOL>idx = np.where(bound == <NUM_LIT:2>)<EOL>idx_within[idx] = <NUM_LIT:1><EOL><DEDENT>inj_V0 = <NUM_LIT:4>*np.pi*V*len(idx_within[idx_within == <NUM_LIT:1>])/float(nsamples)<EOL>injections['<STR_LIT>'] = inj_V0<EOL>z_range = injections['<STR_LIT>']<EOL>V_min = quad(contracted_dVdc, <NUM_LIT:0.>, z_range[<NUM_LIT:0>])[<NUM_LIT:0>]<EOL>V_max = quad(contracted_dVdc, <NUM_LIT:0.>, z_range[<NUM_LIT:1>])[<NUM_LIT:0>]<EOL>thr_falloff, i_inj, i_det, i_det_sq = [], <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL>gps_min, gps_max = <NUM_LIT>, <NUM_LIT:0><EOL>keys = injections.keys()<EOL>for key in keys:<EOL><INDENT>if key == '<STR_LIT>' or key == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>data = injections[key]<EOL>distance = data['<STR_LIT>']<EOL>mass1, mass2 = data['<STR_LIT>'], data['<STR_LIT>']<EOL>spin1z, spin2z = data['<STR_LIT>'], data['<STR_LIT>']<EOL>mchirp = data['<STR_LIT>']<EOL>gps_min = min(gps_min, min(data['<STR_LIT>']))<EOL>gps_max = max(gps_max, max(data['<STR_LIT>']))<EOL>z_inj = dlum_to_z(distance)<EOL>m1_sc, m2_sc = mass1/(<NUM_LIT:1> + z_inj), mass2/(<NUM_LIT:1> + z_inj)<EOL>p_out = model_pdf(m1_sc, m2_sc, spin1z, spin2z)<EOL>p_out *= pdf_z_astro(z_inj, V_min, V_max)<EOL>p_in = <NUM_LIT:0><EOL>J = cosmo.luminosity_distance(z_inj + <NUM_LIT>).value<EOL>J -= cosmo.luminosity_distance(z_inj - <NUM_LIT>).value<EOL>J = abs(J)/<NUM_LIT> <EOL>for key2 in keys:<EOL><INDENT>if key2 == '<STR_LIT>' or key2 == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>dt_j = injections[key2]<EOL>dist_j = dt_j['<STR_LIT>']<EOL>m1_j, m2_j = dt_j['<STR_LIT>'], dt_j['<STR_LIT>']<EOL>s1x_2, s2x_2 = dt_j['<STR_LIT>'], dt_j['<STR_LIT>']<EOL>s1y_2, s2y_2 = dt_j['<STR_LIT>'], dt_j['<STR_LIT>']<EOL>s1z_2, s2z_2 = dt_j['<STR_LIT>'], dt_j['<STR_LIT>']<EOL>s1 = np.sqrt(s1x_2**<NUM_LIT:2> + s1y_2**<NUM_LIT:2> + s1z_2**<NUM_LIT:2>)<EOL>s2 = np.sqrt(s2x_2**<NUM_LIT:2> + s2y_2**<NUM_LIT:2> + s2z_2**<NUM_LIT:2>)<EOL>mch_j = dt_j['<STR_LIT>']<EOL>if dt_j['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>lomass, himass = min(min(m1_j), min(m2_j), max(max(m1_j), max(m2_j)))<EOL>lomass_2, himass_2 = lomass, himass<EOL><DEDENT>elif dt_j['<STR_LIT>'] == '<STR_LIT>' or dt_j['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>lomass, himass = min(m1_j), max(m1_j)<EOL>lomass_2, himass_2 = min(m2_j), max(m2_j)<EOL><DEDENT>if dt_j['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>l_dist = min(dist_j / (mch_j/_mch_BNS)**(<NUM_LIT:5>/<NUM_LIT>))<EOL>h_dist = max(dist_j / (mch_j/_mch_BNS)**(<NUM_LIT:5>/<NUM_LIT>))<EOL><DEDENT>elif dt_j['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>l_dist, h_dist = min(dist_j), max(dist_j)<EOL><DEDENT>mdist = dt_j['<STR_LIT>']<EOL>prob_mass = inj_mass_pdf(mdist, mass1, mass2,<EOL>lomass, himass, lomass_2, himass_2)<EOL>ddist = dt_j['<STR_LIT>']<EOL>prob_dist = inj_distance_pdf(ddist, distance, l_dist,<EOL>h_dist, mchirp)<EOL>hspin1, hspin2 = max(s1), max(s2)<EOL>prob_spin = inj_spin_pdf(dt_j['<STR_LIT>'], hspin1, spin1z)<EOL>prob_spin *= inj_spin_pdf(dt_j['<STR_LIT>'], hspin2, spin2z)<EOL>p_in += prob_mass * prob_dist * prob_spin * J * (<NUM_LIT:1> + z_inj)**<NUM_LIT:2><EOL><DEDENT>p_in[p_in == <NUM_LIT:0>] = <NUM_LIT><EOL>p_out_in = p_out/p_in<EOL>i_inj += np.sum(p_out_in)<EOL>i_det += np.sum((p_out_in)[data[thr_var] > thr_val])<EOL>i_det_sq += np.sum((p_out_in)[data[thr_var] > thr_val]**<NUM_LIT:2>)<EOL>idx_thr = np.where(data[thr_var] > thr_val)<EOL>thrs = data[thr_var][idx_thr]<EOL>ratios = p_out_in[idx_thr]/max(p_out_in[idx_thr])<EOL>rndn = np.random.uniform(<NUM_LIT:0>, <NUM_LIT:1>, len(ratios))<EOL>idx_ratio = np.where(ratios > rndn)<EOL>thr_falloff.append(thrs[idx_ratio])<EOL><DEDENT>inj_V0 = injections['<STR_LIT>']<EOL>injections['<STR_LIT>'] = i_inj<EOL>injections['<STR_LIT>'] = i_det<EOL>injections['<STR_LIT>'] = i_det_sq<EOL>injections['<STR_LIT>'] = ((inj_V0*i_det/i_inj) * (gps_max - gps_min)/<NUM_LIT>)<EOL>injections['<STR_LIT>'] = injections['<STR_LIT>'] * np.sqrt(i_det_sq)/i_det<EOL>injections['<STR_LIT>'] = np.hstack(np.array(thr_falloff).flat)<EOL>return injections<EOL>
Based on injection strategy and the desired astro model estimate the injected volume. Scale injections and estimate sensitive volume. Parameters ---------- injections: dictionary Dictionary obtained after reading injections from read_injections mchirp_sampler: function Sampler for producing chirp mass samples for the astro model. model_pdf: function The PDF for astro model in mass1-mass2-spin1z-spin2z space. This is easily extendible to include precession kwargs: key words Inputs for thresholds and astrophysical models Returns ------- injection_chunks: dictionary The input dictionary with VT and VT error included with the injections
f16018:m1
def process_injections(hdffile):
data = {}<EOL>with h5py.File(hdffile, '<STR_LIT:r>') as inp:<EOL><INDENT>found_index = inp['<STR_LIT>'][:]<EOL>for param in _save_params:<EOL><INDENT>data[param] = inp['<STR_LIT>'+param][:]<EOL><DEDENT>ifar = np.zeros_like(data[_save_params[<NUM_LIT:0>]])<EOL>ifar[found_index] = inp['<STR_LIT>'][:]<EOL>data['<STR_LIT>'] = ifar<EOL>stat = np.zeros_like(data[_save_params[<NUM_LIT:0>]])<EOL>stat[found_index] = inp['<STR_LIT>'][:]<EOL>data['<STR_LIT>'] = stat<EOL><DEDENT>return data<EOL>
Function to read in the injection file and extract the found injections and all injections Parameters ---------- hdffile: hdf file File for which injections are to be processed Returns ------- data: dictionary Dictionary containing injection read from the input file
f16018:m2
def dlum_to_z(dl):
return _dlum_interp(dl)<EOL>
Get the redshift for a luminosity distance Parameters ---------- dl: array The array of luminosity distances Returns ------- array The redshift values corresponding to the luminosity distances
f16018:m3
def astro_redshifts(min_z, max_z, nsamples):
dz, fac = <NUM_LIT>, <NUM_LIT><EOL>V = quad(contracted_dVdc, <NUM_LIT:0.>, max_z)[<NUM_LIT:0>]<EOL>zbins = np.arange(min_z, max_z + dz/<NUM_LIT>, dz)<EOL>zcenter = (zbins[:-<NUM_LIT:1>] + zbins[<NUM_LIT:1>:]) / <NUM_LIT:2><EOL>pdfz = cosmo.differential_comoving_volume(zcenter).value/(<NUM_LIT:1>+zcenter)/V<EOL>int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=<NUM_LIT:0>)<EOL>rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))<EOL>pdf_zs = int_pdf(rndz)<EOL>maxpdf = max(pdf_zs)<EOL>rndn = np.random.uniform(<NUM_LIT:0>, <NUM_LIT:1>, int(fac*nsamples)) * maxpdf<EOL>diff = pdf_zs - rndn<EOL>idx = np.where(diff > <NUM_LIT:0>)<EOL>z_astro = rndz[idx]<EOL>np.random.shuffle(z_astro)<EOL>z_astro.resize(nsamples)<EOL>return z_astro<EOL>
Sample the redshifts for sources, with redshift independent rate, using standard cosmology Parameters ---------- min_z: float Minimum redshift max_z: float Maximum redshift nsamples: int Number of samples Returns ------- z_astro: array nsamples of redshift, between min_z, max_z, by standard cosmology
f16018:m4
def pdf_z_astro(z, V_min, V_max):
return contracted_dVdc(z)/(V_max - V_min)<EOL>
Get the probability density for the rate of events at a redshift assuming standard cosmology
f16018:m5
def inj_mass_pdf(key, mass1, mass2, lomass, himass, lomass_2 = <NUM_LIT:0>, himass_2 = <NUM_LIT:0>):
mass1, mass2 = np.array(mass1), np.array(mass2)<EOL>if key == '<STR_LIT>':<EOL><INDENT>bound = np.sign((lomass + himass) - (mass1 + mass2))<EOL>bound += np.sign((himass - mass1)*(mass1 - lomass))<EOL>bound += np.sign((himass - mass2)*(mass2 - lomass))<EOL>idx = np.where(bound != <NUM_LIT:3>)<EOL>pdf = <NUM_LIT:1.>/(himass - lomass)/(mass1 + mass2 - <NUM_LIT:2> * lomass)<EOL>pdf[idx] = <NUM_LIT:0><EOL>return pdf<EOL><DEDENT>if key == '<STR_LIT>':<EOL><INDENT>bound = np.sign((himass - mass1)*(mass1 - lomass))<EOL>bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2))<EOL>idx = np.where(bound != <NUM_LIT:2>)<EOL>pdf = np.ones_like(mass1) / (himass - lomass) / (himass_2 - lomass_2)<EOL>pdf[idx] = <NUM_LIT:0><EOL>return pdf<EOL><DEDENT>if key == '<STR_LIT>':<EOL><INDENT>bound = np.sign((himass - mass1)*(mass1 - lomass))<EOL>bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2))<EOL>idx = np.where(bound != <NUM_LIT:2>)<EOL>pdf = <NUM_LIT:1> / (log(himass) - log(lomass)) / (log(himass_2) - log(lomass_2))<EOL>pdf /= (mass1 * mass2)<EOL>pdf[idx] = <NUM_LIT:0><EOL>return pdf<EOL><DEDENT>
Estimate the probability density based on the injection strategy Parameters ---------- key: string Injection strategy mass1: array First mass of the injections mass2: array Second mass of the injections lomass: float Lower value of the mass distributions himass: float higher value of the mass distribution Returns ------- pdf: array Probability density of the injections
f16018:m7
def inj_spin_pdf(key, high_spin, spinz):
<EOL>if spinz[<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>return np.ones_like(spinz)<EOL><DEDENT>spinz = np.array(spinz)<EOL>bound = np.sign(np.absolute(high_spin) - np.absolute(spinz))<EOL>bound += np.sign(<NUM_LIT:1> - np.absolute(spinz))<EOL>if key == '<STR_LIT>':<EOL><INDENT>pdf = (np.log(high_spin - np.log(abs(spinz)))/high_spin/<NUM_LIT:2>)<EOL>idx = np.where(bound != <NUM_LIT:2>)<EOL>pdf[idx] = <NUM_LIT:0><EOL>return pdf<EOL><DEDENT>if key == '<STR_LIT>':<EOL><INDENT>pdf = (np.ones_like(spinz) / <NUM_LIT:2> / high_spin)<EOL>idx = np.where(bound != <NUM_LIT:2>)<EOL>pdf[idx] = <NUM_LIT:0><EOL>return pdf<EOL><DEDENT>if key == '<STR_LIT>':<EOL><INDENT>pdf = np.ones_like(spinz)<EOL>return pdf<EOL><DEDENT>
Estimate the probability density of the injections for the spin distribution. Parameters ---------- key: string Injections strategy high_spin: float Maximum spin used in the strategy spinz: array Spin of the injections (for one component)
f16018:m8
def inj_distance_pdf(key, distance, low_dist, high_dist, mchirp = <NUM_LIT:1>):
distance = np.array(distance)<EOL>if key == '<STR_LIT>':<EOL><INDENT>pdf = np.ones_like(distance)/(high_dist - low_dist)<EOL>bound = np.sign((high_dist - distance)*(distance - low_dist))<EOL>idx = np.where(bound != <NUM_LIT:1>)<EOL>pdf[idx] = <NUM_LIT:0><EOL>return pdf<EOL><DEDENT>if key == '<STR_LIT>':<EOL><INDENT>weight = (mchirp/_mch_BNS)**(<NUM_LIT>/<NUM_LIT:6>)<EOL>pdf = np.ones_like(distance) / weight / (high_dist - low_dist)<EOL>bound = np.sign((weight*high_dist - distance)*(distance - weight*low_dist))<EOL>idx = np.where(bound != <NUM_LIT:1>)<EOL>pdf[idx] = <NUM_LIT:0><EOL>return pdf<EOL><DEDENT>
Estimate the probability density of the injections for the distance distribution. Parameters ---------- key: string Injections strategy distance: array Array of distances low_dist: float Lower value of distance used in the injection strategy high_dist: float Higher value of distance used in the injection strategy
f16018:m9
def insert_processing_option_group(parser):
processing_group = parser.add_argument_group("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>processing_group.add_argument("<STR_LIT>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>" + str(list(set(scheme_prefix.values()))) +<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>default="<STR_LIT>")<EOL>processing_group.add_argument("<STR_LIT>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>",<EOL>default=<NUM_LIT:0>, type=int)<EOL>
Adds the options used to choose a processing scheme. This should be used if your program supports the ability to select the processing scheme. Parameters ---------- parser : object OptionParser instance
f16019:m5
def from_cli(opt):
scheme_str = opt.processing_scheme.split('<STR_LIT::>')<EOL>name = scheme_str[<NUM_LIT:0>]<EOL>if name == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>ctx = CUDAScheme(opt.processing_device_id)<EOL><DEDENT>elif name == "<STR_LIT>":<EOL><INDENT>if len(scheme_str) > <NUM_LIT:1>:<EOL><INDENT>numt = scheme_str[<NUM_LIT:1>]<EOL>if numt.isdigit():<EOL><INDENT>numt = int(numt)<EOL><DEDENT>ctx = MKLScheme(num_threads=numt)<EOL><DEDENT>else:<EOL><INDENT>ctx = MKLScheme()<EOL><DEDENT>logging.info("<STR_LIT>" % ctx.num_threads)<EOL><DEDENT>else:<EOL><INDENT>if len(scheme_str) > <NUM_LIT:1>:<EOL><INDENT>numt = scheme_str[<NUM_LIT:1>]<EOL>if numt.isdigit():<EOL><INDENT>numt = int(numt)<EOL><DEDENT>ctx = CPUScheme(num_threads=numt)<EOL><DEDENT>else:<EOL><INDENT>ctx = CPUScheme()<EOL><DEDENT>logging.info("<STR_LIT>" % ctx.num_threads)<EOL><DEDENT>return ctx<EOL>
Parses the command line options and returns a precessing scheme. Parameters ---------- opt: object Result of parsing the CLI with OptionParser, or any object with the required attributes. Returns ------- ctx: Scheme Returns the requested processing scheme.
f16019:m6
def verify_processing_options(opt, parser):
scheme_types = scheme_prefix.values()<EOL>if opt.processing_scheme.split('<STR_LIT::>')[<NUM_LIT:0>] not in scheme_types:<EOL><INDENT>parser.error("<STR_LIT>")<EOL><DEDENT>
Parses the processing scheme options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance.
f16019:m7
def complex_median(complex_list):
median_real = numpy.median([complex_number.real<EOL>for complex_number in complex_list])<EOL>median_imag = numpy.median([complex_number.imag<EOL>for complex_number in complex_list])<EOL>return median_real + <NUM_LIT>*median_imag<EOL>
Get the median value of a list of complex numbers. Parameters ---------- complex_list: list List of complex numbers to calculate the median. Returns ------- a + 1.j*b: complex number The median of the real and imaginary parts.
f16021:m0
def avg_inner_product(data1, data2, bin_size):
assert data1.duration == data2.duration<EOL>assert data1.sample_rate == data2.sample_rate<EOL>seglen = int(bin_size * data1.sample_rate)<EOL>inner_prod = []<EOL>for idx in range(int(data1.duration / bin_size)):<EOL><INDENT>start, end = idx * seglen, (idx+<NUM_LIT:1>) * seglen<EOL>norm = len(data1[start:end])<EOL>bin_prod = <NUM_LIT:2> * sum(data1.data[start:end].real *<EOL>numpy.conjugate(data2.data[start:end])) / norm<EOL>inner_prod.append(bin_prod)<EOL><DEDENT>inner_median = complex_median(inner_prod)<EOL>return inner_prod, numpy.abs(inner_median), numpy.angle(inner_median)<EOL>
Calculate the time-domain inner product averaged over bins. Parameters ---------- data1: pycbc.types.TimeSeries First data set. data2: pycbc.types.TimeSeries Second data set, with same duration and sample rate as data1. bin_size: float Duration of the bins the data will be divided into to calculate the inner product. Returns ------- inner_prod: list The (complex) inner product of data1 and data2 obtained in each bin. amp: float The absolute value of the median of the inner product. phi: float The angle of the median of the inner product.
f16021:m1
def line_model(freq, data, tref, amp=<NUM_LIT:1>, phi=<NUM_LIT:0>):
freq_line = TimeSeries(zeros(len(data)), delta_t=data.delta_t,<EOL>epoch=data.start_time)<EOL>times = data.sample_times - float(tref)<EOL>alpha = <NUM_LIT:2> * numpy.pi * freq * times + phi<EOL>freq_line.data = amp * numpy.exp(<NUM_LIT> * alpha)<EOL>return freq_line<EOL>
Simple time-domain model for a frequency line. Parameters ---------- freq: float Frequency of the line. data: pycbc.types.TimeSeries Reference data, to get delta_t, start_time, duration and sample_times. tref: float Reference time for the line model. amp: {1., float}, optional Amplitude of the frequency line. phi: {0. float}, optional Phase of the frequency line (radians). Returns ------- freq_line: pycbc.types.TimeSeries A timeseries of the line model with frequency 'freq'. The returned data are complex to allow measuring the amplitude and phase of the corresponding frequency line in the strain data. For extraction, use only the real part of the data.
f16021:m2
def matching_line(freq, data, tref, bin_size=<NUM_LIT:1>):
template_line = line_model(freq, data, tref=tref)<EOL>_, amp, phi = avg_inner_product(data, template_line,<EOL>bin_size=bin_size)<EOL>return line_model(freq, data, tref=tref, amp=amp, phi=phi)<EOL>
Find the parameter of the line with frequency 'freq' in the data. Parameters ---------- freq: float Frequency of the line to find in the data. data: pycbc.types.TimeSeries Data from which the line wants to be measured. tref: float Reference time for the frequency line. bin_size: {1, float}, optional Duration of the bins the data will be divided into for averaging. Returns ------- line_model: pycbc.types.TimeSeries A timeseries containing the frequency line with the amplitude and phase measured from the data.
f16021:m3