sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def merged_srcmaps(self, **kwargs):
""" return the name of a source map file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.merged_srcmaps_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath
|
return the name of a source map file
|
entailment
|
def master_srcmdl_xml(self, **kwargs):
""" return the name of a source model file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.master_srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath
|
return the name of a source model file
|
entailment
|
def comp_srcmdl_xml(self, **kwargs):
""" return the name of a source model file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.comp_srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath
|
return the name of a source model file
|
entailment
|
def fullpath(self, **kwargs):
"""Return a full path name for a given file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
return NameFactory.fullpath_format.format(**kwargs_copy)
|
Return a full path name for a given file
|
entailment
|
def generic(self, input_string, **kwargs):
""" return a generic filename for a given dataset and component
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
return input_string.format(**kwargs_copy)
|
return a generic filename for a given dataset and component
|
entailment
|
def make_filenames(self, **kwargs):
""" Make a dictionary of filenames for various types
"""
out_dict = dict(ft1file=self.ft1file(**kwargs),
ltcube=self.ltcube(**kwargs),
ccube=self.ccube(**kwargs),
bexpcube=self.bexpcube(**kwargs),
srcmaps=self.srcmaps(**kwargs),
mcube=self.mcube(**kwargs))
return out_dict
|
Make a dictionary of filenames for various types
|
entailment
|
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = args.get('data')
comp = args.get('comp')
library = args.get('library')
dry_run = args.get('dry_run', False)
self._set_link('sum-rings', SumRings_SG,
library=library,
outdir=args['outdir'],
dry_run=dry_run)
self._set_link('srcmaps-diffuse', SrcmapsDiffuse_SG,
comp=comp, data=data,
library=library,
make_xml=args['make_xml'],
dry_run=dry_run)
self._set_link('vstack-diffuse', Vstack_SG,
comp=comp, data=data,
library=library,
dry_run=dry_run)
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = args.get('data')
comp = args.get('comp')
library = args.get('library')
dry_run = args.get('dry_run', False)
self._set_link('srcmaps-catalog', SrcmapsCatalog_SG,
comp=comp, data=data,
library=library,
nsrc=args.get('nsrc', 500),
dry_run=dry_run)
self._set_link('gather-srcmaps', GatherSrcmaps_SG,
comp=comp, data=data,
library=library,
dry_run=dry_run)
self._set_link('merge-srcmaps', MergeSrcmaps_SG,
comp=comp, data=data,
library=library,
dry_run=dry_run)
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
config_yaml = args['config']
config_dict = load_yaml(config_yaml)
dry_run = args.get('dry_run', False)
data = config_dict.get('data')
comp = config_dict.get('comp')
library = config_dict.get('library')
models = config_dict.get('models')
scratch = config_dict.get('scratch')
self._set_link('prepare', SplitAndBinChain,
comp=comp, data=data,
ft1file=config_dict.get('ft1file'),
hpx_order_ccube=config_dict.get('hpx_order_ccube'),
hpx_order_expcube=config_dict.get('hpx_order_expcube'),
scratch=scratch,
dry_run=dry_run)
self._set_link('diffuse-comp', DiffuseCompChain,
comp=comp, data=data,
library=library,
make_xml=config_dict.get('make_diffuse_comp_xml', False),
outdir=config_dict.get('merged_gasmap_dir', 'merged_gasmap'),
dry_run=dry_run)
self._set_link('catalog-comp', CatalogCompChain,
comp=comp, data=data,
library=library,
make_xml=config_dict.get('make_catalog_comp_xml', False),
nsrc=config_dict.get('catalog_nsrc', 500),
dry_run=dry_run)
self._set_link('assemble-model', AssembleModelChain,
comp=comp, data=data,
library=library,
models=models,
hpx_order=config_dict.get('hpx_order_fitting'),
dry_run=dry_run)
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None):
"""Create a Function object from a parameter dictionary.
Parameters
----------
spectrum_type : str
String identifying the spectrum type (e.g. PowerLaw).
spectral_pars : dict
Dictionary of spectral parameters.
"""
if fn is None:
fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type))
if spectrum_type == 'PiecewisePowerLaw':
build_piecewise_powerlaw(fn, spectral_pars)
for k, v in spectral_pars.items():
v.setdefault('scale', 1.0)
v.setdefault('min', v['value'] * 1E-3)
v.setdefault('max', v['value'] * 1E3)
par = fn.getParam(str(k))
vmin = min(float(v['value']), float(v['min']))
vmax = max(float(v['value']), float(v['max']))
par.setValue(float(v['value']))
par.setBounds(vmin, vmax)
par.setScale(float(v['scale']))
if 'free' in v and int(v['free']) != 0:
par.setFree(True)
else:
par.setFree(False)
fn.setParam(par)
return fn
|
Create a Function object from a parameter dictionary.
Parameters
----------
spectrum_type : str
String identifying the spectrum type (e.g. PowerLaw).
spectral_pars : dict
Dictionary of spectral parameters.
|
entailment
|
def gtlike_spectrum_to_dict(spectrum):
""" Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file."""
parameters = pyLike.ParameterVector()
spectrum.getParams(parameters)
d = dict(spectrum_type=spectrum.genericName())
for p in parameters:
pname = p.getName()
pval = p.getTrueValue()
perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan
d[pname] = np.array([pval, perr])
if d['spectrum_type'] == 'FileFunction':
ff = pyLike.FileFunction_cast(spectrum)
d['file'] = ff.filename()
return d
|
Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file.
|
entailment
|
def gtlike_spectrum_to_vectors(spectrum):
""" Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file."""
parameters = pyLike.ParameterVector()
spectrum.getParams(parameters)
npar = max(parameters.size(), 10)
o = {'param_names': np.zeros(npar, dtype='S32'),
'param_values': np.empty(npar, dtype=float) * np.nan,
'param_errors': np.empty(npar, dtype=float) * np.nan,
}
for i, p in enumerate(parameters):
o['param_names'][i] = p.getName()
o['param_values'][i] = p.getTrueValue()
perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan
o['param_errors'][i] = perr
return o
|
Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file.
|
entailment
|
def get_function_pars(fn):
"""Extract the parameters of a pyLikelihood function object
(value, scale, bounds).
Parameters
----------
fn : pyLikelihood.Function
Returns
-------
pars : list
"""
pars = []
par_names = pyLike.StringVector()
fn.getParamNames(par_names)
for pname in par_names:
par = fn.getParam(pname)
bounds = par.getBounds()
perr = par.error() if par.isFree() else np.nan
pars += [dict(name=pname,
value=par.getValue(),
error=perr,
min=bounds[0],
max=bounds[1],
free=par.isFree(),
scale=par.getScale())]
return pars
|
Extract the parameters of a pyLikelihood function object
(value, scale, bounds).
Parameters
----------
fn : pyLikelihood.Function
Returns
-------
pars : list
|
entailment
|
def get_priors(like):
"""Extract priors from a likelihood object."""
npar = len(like.params())
vals = np.ones(npar)
errs = np.ones(npar)
has_prior = np.array([False] * npar)
for i, p in enumerate(like.params()):
prior = like[i].log_prior()
if prior is None:
continue
par_names = pyLike.StringVector()
prior.getParamNames(par_names)
if not 'Mean' in par_names:
raise Exception('Failed to find Mean in prior parameters.')
if not 'Sigma' in par_names:
raise Exception('Failed to find Sigma in prior parameters.')
for t in par_names:
if t == 'Mean':
vals[i] = prior.parameter(t).getValue()
if t == 'Sigma':
errs[i] = prior.parameter(t).getValue()
has_prior[i] = True
return vals, errs, has_prior
|
Extract priors from a likelihood object.
|
entailment
|
def get_source_pars(src):
"""Extract the parameters associated with a pyLikelihood Source object.
"""
fnmap = src.getSrcFuncs()
keys = fnmap.keys()
if 'Position' in keys:
ppars = get_function_pars(src.getSrcFuncs()[str('Position')])
elif 'SpatialDist' in keys:
ppars = get_function_pars(src.getSrcFuncs()[str('SpatialDist')])
else:
raise Exception('Failed to extract spatial parameters.')
fn = src.getSrcFuncs()[str('Spectrum')]
spars = get_function_pars(fn)
for i, p in enumerate(ppars):
ppars[i]['is_norm'] = False
for i, p in enumerate(spars):
if fn.normPar().getName() == p['name']:
spars[i]['is_norm'] = True
else:
spars[i]['is_norm'] = False
return spars, ppars
|
Extract the parameters associated with a pyLikelihood Source object.
|
entailment
|
def nFreeParams(self):
"""Count the number of free parameters in the active model."""
nF = 0
pars = self.params()
for par in pars:
if par.isFree():
nF += 1
return nF
|
Count the number of free parameters in the active model.
|
entailment
|
def Ts2(self, srcName, reoptimize=False, approx=True,
tol=None, MaxIterations=10, verbosity=0):
"""Computes the TS value for a source indicated by "srcName."
If "reoptimize=True" is selected this function will reoptimize
the model up to "MaxIterations" given the tolerance "tol"
(default is the tolerance selected for the overall fit). If
"appox=True" is selected (the default) it will renormalize the
model (see _renorm).
"""
saved_state = LikelihoodState(self)
if verbosity > 0:
print("*** Start Ts_dl ***")
source_attributes = self.getExtraSourceAttributes()
self.logLike.syncParams()
src = self.logLike.getSource(srcName)
self._ts_src = src
freeParams = pyLike.DoubleVector()
self.logLike.getFreeParamValues(freeParams)
logLike1 = self.logLike.value()
self.scaleSource(srcName, 1E-10)
logLike0 = self.logLike.value()
if tol is None:
tol = self.tol
if reoptimize:
if verbosity > 0:
print("** Do reoptimize")
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(self.optimizer, self.logLike)
Niter = 1
while Niter <= MaxIterations:
try:
myOpt.find_min(0, tol)
break
except RuntimeError as e:
print(e)
if verbosity > 0:
print("** Iteration :", Niter)
Niter += 1
else:
if approx:
try:
self._renorm()
except ZeroDivisionError:
pass
self.logLike.syncParams()
logLike0 = max(self.logLike.value(), logLike0)
Ts_value = 2 * (logLike1 - logLike0)
self.scaleSource(srcName, 1E10)
self.logLike.setFreeParamValues(freeParams)
self.model = SourceModel(self.logLike)
for src in source_attributes:
self.model[src].__dict__.update(source_attributes[src])
saved_state.restore()
self.logLike.value()
return Ts_value
|
Computes the TS value for a source indicated by "srcName."
If "reoptimize=True" is selected this function will reoptimize
the model up to "MaxIterations" given the tolerance "tol"
(default is the tolerance selected for the overall fit). If
"appox=True" is selected (the default) it will renormalize the
model (see _renorm).
|
entailment
|
def _make_scatter_logfile_name(cls, key, linkname, job_config):
"""Hook to inster the name of a logfile into the input config """
logfile = job_config.get('logfile', "%s_%s_%s.log" %
(cls.default_prefix_logfile, linkname, key))
job_config['logfile'] = logfile
|
Hook to inster the name of a logfile into the input config
|
entailment
|
def create(cls, **kwargs):
"""Build and return a `ScatterGather` object """
linkname = kwargs.setdefault('linkname', cls.clientclass.linkname_default)
# Don't use setdefault b/c we don't want to build a JobArchive
# Unless it is needed
job_archive = kwargs.get('job_archive', None)
if job_archive is None:
job_archive = JobArchive.build_temp_job_archive()
kwargs.setdefault('job_archive', job_archive)
kwargs_client = dict(linkname=linkname,
link_prefix=kwargs.get('link_prefix', ''),
file_stage=kwargs.get('file_stage', None),
job_archive=job_archive)
link = cls.clientclass.create(**kwargs_client)
sg = cls(link, **kwargs)
return sg
|
Build and return a `ScatterGather` object
|
entailment
|
def _latch_file_info(self):
"""Internal function to update the dictionaries
keeping track of input and output files
"""
self.files.file_dict.clear()
self.sub_files.file_dict.clear()
self.files.latch_file_info(self.args)
self._scatter_link._update_sub_file_dict(self.sub_files)
|
Internal function to update the dictionaries
keeping track of input and output files
|
entailment
|
def _check_link_completion(self, link, fail_pending=False, fail_running=False):
"""Internal function to check the completion of all the dispatched jobs
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
"""
status_vect = JobStatusVector()
for job_key, job_details in link.jobs.items():
# if job_details.status == JobStatus.failed:
# failed = True
# continue
# elif job_details.status == JobStatus.done:
# continue
if job_key.find(JobDetails.topkey) >= 0:
continue
job_details.status = self._interface.check_job(job_details)
if job_details.status == JobStatus.pending:
if fail_pending:
job_details.status = JobStatus.failed
elif job_details.status == JobStatus.running:
if fail_running:
job_details.status = JobStatus.failed
status_vect[job_details.status] += 1
link.jobs[job_key] = job_details
link._set_status_self(job_details.jobkey, job_details.status)
return status_vect
|
Internal function to check the completion of all the dispatched jobs
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
|
entailment
|
def _build_job_dict(self):
"""Build a dictionary of `JobDetails` objects for the internal `Link`"""
if self.args['dry_run']:
status = JobStatus.unknown
else:
status = JobStatus.not_ready
base_config = self.scatter_link.args
for jobkey, job_config in sorted(self._job_configs.items()):
full_job_config = base_config.copy()
full_job_config.update(job_config)
ScatterGather._make_scatter_logfile_name(jobkey,
self.linkname, full_job_config)
logfile = job_config.get('logfile')
self._scatter_link._register_job(key=jobkey,
job_config=full_job_config,
logfile=logfile,
status=status)
|
Build a dictionary of `JobDetails` objects for the internal `Link`
|
entailment
|
def _run_link(self, stream=sys.stdout, dry_run=False,
stage_files=True, resubmit_failed=False):
"""Internal function that actually runs this link.
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this will skip execution.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
must have 'write' function.
dry_run : bool
Print command but do not run it.
stage_files : bool
Stage files to and from the scratch area.
resubmit_failed : bool
Resubmit failed jobs.
"""
if resubmit_failed:
self.args['action'] = 'resubmit'
argv = self._make_argv()
if dry_run:
argv.append('--dry_run')
self._invoke(argv, stream, resubmit_failed=resubmit_failed)
|
Internal function that actually runs this link.
This checks if input and output files are present.
If input files are missing this will raise `OSError` if dry_run is False
If all output files are present this will skip execution.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
must have 'write' function.
dry_run : bool
Print command but do not run it.
stage_files : bool
Stage files to and from the scratch area.
resubmit_failed : bool
Resubmit failed jobs.
|
entailment
|
def _invoke(self, argv, stream=sys.stdout, resubmit_failed=False):
"""Invoke this object to preform a particular action
Parameters
----------
argv : list
List of command line arguments, passed to helper classes
stream : `file`
Stream that this function will print to,
must have 'write' function.
resubmit_failed : bool
Resubmit failed jobs.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
"""
args = self._run_argparser(argv)
if args.action not in ACTIONS:
sys.stderr.write(
"Unrecognized action %s, options are %s\n" % (args.action, ACTIONS))
if args.action == 'skip':
return JobStatus.no_job
elif args.action in ['run', 'resubmit', 'check_status', 'config']:
self._job_configs = self.build_job_configs(args.__dict__)
self._interface._dry_run = args.dry_run
if args.action == 'run':
status_vect = self.run_jobs(stream, resubmit_failed=resubmit_failed)
elif args.action == 'resubmit':
status_vect = self.resubmit(stream, resubmit_failed=resubmit_failed)
elif args.action == 'check_status':
self._build_job_dict()
status_vect = self.check_status(stream)
elif args.action == 'config':
self._build_job_dict()
status_vect = JobStatusVector()
status_vect[JobStatus.done] += 1
return status_vect
|
Invoke this object to preform a particular action
Parameters
----------
argv : list
List of command line arguments, passed to helper classes
stream : `file`
Stream that this function will print to,
must have 'write' function.
resubmit_failed : bool
Resubmit failed jobs.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
|
entailment
|
def update_args(self, override_args):
"""Update the arguments used to invoke the application
Note that this will also update the dictionary of input and output files
Parameters
----------
override_args : dict
dictionary of arguments to override the current values
"""
self.args = extract_arguments(override_args, self.args)
self._job_configs = self.build_job_configs(self.args)
if not self._scatter_link.jobs:
self._build_job_dict()
self._latch_file_info()
|
Update the arguments used to invoke the application
Note that this will also update the dictionary of input and output files
Parameters
----------
override_args : dict
dictionary of arguments to override the current values
|
entailment
|
def clear_jobs(self, recursive=True):
"""Clear the self.jobs dictionary that contains information
about jobs associated with this `ScatterGather`
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
self._scatter_link.clear_jobs(recursive)
self.jobs.clear()
|
Clear the self.jobs dictionary that contains information
about jobs associated with this `ScatterGather`
If recursive is True this will include jobs from all internal `Link`
|
entailment
|
def get_jobs(self, recursive=True):
"""Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
ret_dict = self.jobs.copy()
ret_dict.update(self._scatter_link.get_jobs(recursive))
return ret_dict
return self.jobs
|
Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
|
entailment
|
def check_status(self, stream=sys.stdout,
check_once=False,
fail_pending=False, fail_running=False,
no_wait=False, do_print=True,
write_status=False):
"""Loop to check on the status of all the jobs in job dict.
Parameters
-----------
stream : `file`
Stream that this function will print to,
Must have 'write' function.
check_once : bool
Check status once and exit loop.
fail_pending : `bool`
If True, consider pending jobs as failed
fail_running : `bool`
If True, consider running jobs as failed
no_wait : bool
Do not sleep before checking jobs.
do_print : bool
Print summary stats.
write_status : bool
Write the status the to log file.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
"""
running = True
first = True
if not check_once:
if stream != sys.stdout:
sys.stdout.write('Checking status (%is): ' %
self.args['job_check_sleep'])
sys.stdout.flush()
status_vect = JobStatusVector()
while running:
if first:
first = False
elif self.args['dry_run']:
break
elif no_wait:
pass
else:
stream.write("Sleeping %.0f seconds between status checks\n" %
self.args['job_check_sleep'])
if stream != sys.stdout:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(self.args['job_check_sleep'])
status_vect = self._check_link_completion(self._scatter_link,
fail_pending, fail_running)
if self.args['check_status_once'] or check_once or no_wait:
if do_print:
self.print_update(stream, status_vect)
break
if self.args['print_update']:
if do_print:
self.print_update(stream, status_vect)
if self._job_archive is not None:
self._job_archive.write_table_file()
n_total = status_vect.n_total
n_done = status_vect.n_done
n_failed = status_vect.n_failed
if n_done + n_failed == n_total:
running = False
status = status_vect.get_status()
if status in [JobStatus.failed, JobStatus.partial_failed]:
if do_print:
self.print_update(stream, status_vect)
self.print_failed(stream)
if write_status:
self._write_status_to_log(status, stream)
else:
if write_status:
self._write_status_to_log(0, stream)
self._set_status_self(status=status)
if not check_once:
if stream != sys.stdout:
sys.stdout.write("! %s\n" % (JOB_STATUS_STRINGS[status]))
if self._job_archive is not None:
self._job_archive.write_table_file()
return status_vect
|
Loop to check on the status of all the jobs in job dict.
Parameters
-----------
stream : `file`
Stream that this function will print to,
Must have 'write' function.
check_once : bool
Check status once and exit loop.
fail_pending : `bool`
If True, consider pending jobs as failed
fail_running : `bool`
If True, consider running jobs as failed
no_wait : bool
Do not sleep before checking jobs.
do_print : bool
Print summary stats.
write_status : bool
Write the status the to log file.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
|
entailment
|
def run_jobs(self, stream=sys.stdout, resubmit_failed=False):
"""Function to dipatch jobs and collect results
Parameters
-----------
stream : `file`
Stream that this function will print to,
Must have 'write' function.
resubmit_failed : bool
Resubmit failed jobs.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
"""
self._build_job_dict()
self._interface._dry_run = self.args['dry_run']
scatter_status = self._interface.submit_jobs(self.scatter_link,
job_archive=self._job_archive,
stream=stream)
if scatter_status == JobStatus.failed:
return JobStatus.failed
status_vect = self.check_status(stream, write_status=True)
status = status_vect.get_status()
if status == JobStatus.partial_failed:
if resubmit_failed:
sys.write("Resubmitting partially failed link %s\n" %
self.full_linkname)
status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=True)
else:
sys.stdout.write("NOT resubmitting partially failed link %s\n" %
self.full_linkname)
return status_vect
|
Function to dipatch jobs and collect results
Parameters
-----------
stream : `file`
Stream that this function will print to,
Must have 'write' function.
resubmit_failed : bool
Resubmit failed jobs.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
|
entailment
|
def resubmit(self, stream=sys.stdout, fail_running=False, resubmit_failed=False):
"""Function to resubmit failed jobs and collect results
Parameters
-----------
stream : `file`
Stream that this function will print to,
Must have 'write' function.
fail_running : `bool`
If True, consider running jobs as failed
resubmit_failed : bool
Resubmit failed jobs.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
"""
self._build_job_dict()
status_vect = self.check_status(stream, check_once=True, fail_pending=True,
fail_running=fail_running)
status = status_vect.get_status()
if status == JobStatus.done:
return status
failed_jobs = self._scatter_link.get_failed_jobs(True, True)
if failed_jobs:
scatter_status = self._interface.submit_jobs(self._scatter_link, failed_jobs,
job_archive=self._job_archive,
stream=stream)
if scatter_status == JobStatus.failed:
return JobStatus.failed
status_vect = self.check_status(stream, write_status=True)
status = status_vect.get_status()
if status == JobStatus.partial_failed:
if resubmit_failed:
sys.stdout.write("Resubmitting partially failed link %s\n" %
self.full_linkname)
status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=False)
else:
sys.stdout.write("NOT resubmitting partially failed link %s\n" %
self.full_linkname)
if self.args['dry_run']:
return JobStatus.unknown
return status_vect
|
Function to resubmit failed jobs and collect results
Parameters
-----------
stream : `file`
Stream that this function will print to,
Must have 'write' function.
fail_running : `bool`
If True, consider running jobs as failed
resubmit_failed : bool
Resubmit failed jobs.
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
|
entailment
|
def clean_jobs(self, recursive=False):
"""Clean up all the jobs associated with this object.
If recursive is True this also clean jobs dispatch by this
object."""
self._interface.clean_jobs(self.scatter_link,
clean_all=recursive)
|
Clean up all the jobs associated with this object.
If recursive is True this also clean jobs dispatch by this
object.
|
entailment
|
def print_summary(self, stream=sys.stdout, indent="", recurse_level=2):
"""Print a summary of the activity done by this `Link`.
Parameters
----------
stream : `file`
Stream to print to
indent : str
Indentation at start of line
recurse_level : int
Number of recursion levels to print
"""
Link.print_summary(self, stream, indent, recurse_level)
if recurse_level > 0:
recurse_level -= 1
indent += " "
stream.write("\n")
self._scatter_link.print_summary(stream, indent, recurse_level)
|
Print a summary of the activity done by this `Link`.
Parameters
----------
stream : `file`
Stream to print to
indent : str
Indentation at start of line
recurse_level : int
Number of recursion levels to print
|
entailment
|
def print_update(self, stream=sys.stdout, job_stats=None):
"""Print an update about the current number of jobs running """
if job_stats is None:
job_stats = JobStatusVector()
job_det_list = []
job_det_list += self._scatter_link.jobs.values()
for job_dets in job_det_list:
if job_dets.status == JobStatus.no_job:
continue
job_stats[job_dets.status] += 1
stream.write("Status :\n Total : %i\n Unknown: %i\n" %
(job_stats.n_total, job_stats[JobStatus.unknown]))
stream.write(" Not Ready: %i\n Ready: %i\n" %
(job_stats[JobStatus.not_ready], job_stats[JobStatus.ready]))
stream.write(" Pending: %i\n Running: %i\n" %
(job_stats[JobStatus.pending], job_stats[JobStatus.running]))
stream.write(" Done: %i\n Failed: %i\n" %
(job_stats[JobStatus.done], job_stats[JobStatus.failed]))
|
Print an update about the current number of jobs running
|
entailment
|
def print_failed(self, stream=sys.stderr):
"""Print list of the failed jobs """
for job_key, job_details in sorted(self.scatter_link.jobs.items()):
if job_details.status == JobStatus.failed:
stream.write("Failed job %s\n log = %s\n" %
(job_key, job_details.logfile))
|
Print list of the failed jobs
|
entailment
|
def read_sources_from_numpy_file(npfile):
""" Open a numpy pickle file and read all the new sources into a dictionary
Parameters
----------
npfile : file name
The input numpy pickle file
Returns
-------
tab : `~astropy.table.Table`
"""
srcs = np.load(npfile).flat[0]['sources']
roi = ROIModel()
roi.load_sources(srcs.values())
return roi.create_table()
|
Open a numpy pickle file and read all the new sources into a dictionary
Parameters
----------
npfile : file name
The input numpy pickle file
Returns
-------
tab : `~astropy.table.Table`
|
entailment
|
def read_sources_from_yaml_file(yamlfile):
""" Open a yaml file and read all the new sources into a dictionary
Parameters
----------
yaml : file name
The input yaml file
Returns
-------
tab : `~astropy.table.Table`
"""
f = open(yamlfile)
dd = yaml.load(f)
srcs = dd['sources']
f.close()
roi = ROIModel()
roi.load_sources(srcs.values())
return roi.create_table()
|
Open a yaml file and read all the new sources into a dictionary
Parameters
----------
yaml : file name
The input yaml file
Returns
-------
tab : `~astropy.table.Table`
|
entailment
|
def merge_source_tables(src_tab, tab, all_sources=False, prefix="", suffix="",
roi_idx=None):
"""Append the sources in a table into another table.
Parameters
----------
src_tab : `~astropy.table.Table`
Master source table that will be appended with the sources in
``tab``.
tab : `~astropy.table.Table`
Table to be merged into ``src_tab``.
all_sources : bool
If true, then all the sources get added to the table.
if false, then only the sources that start with 'PS' get added
prefix : str
Prepended to all source names
suffix : str
Appended to all source names
Returns
-------
tab : `~astropy.table.Table`
"""
if roi_idx is not None and 'roi' not in tab.columns:
tab.add_column(Column(name='roi', data=len(tab) * [roi_idx]))
remove_rows = []
for i, row in enumerate(tab):
if not all_sources and row['name'].find("PS") != 0:
remove_rows += [i]
continue
sname = "%s%s%s" % (prefix, row['name'], suffix)
row['name'] = sname
tab.remove_rows(remove_rows)
if src_tab is None:
src_tab = tab
else:
src_tab = vstack([src_tab, tab], join_type='outer')
return src_tab
|
Append the sources in a table into another table.
Parameters
----------
src_tab : `~astropy.table.Table`
Master source table that will be appended with the sources in
``tab``.
tab : `~astropy.table.Table`
Table to be merged into ``src_tab``.
all_sources : bool
If true, then all the sources get added to the table.
if false, then only the sources that start with 'PS' get added
prefix : str
Prepended to all source names
suffix : str
Appended to all source names
Returns
-------
tab : `~astropy.table.Table`
|
entailment
|
def lightcurve(self, name, **kwargs):
"""Generate a lightcurve for the named source. The function will
complete the basic analysis steps for each bin and perform a
likelihood fit for each bin. Extracted values (along with
errors) are Integral Flux, spectral model, Spectral index, TS
value, pred. # of photons. Note: successful calculation of
TS:subscript:`var` requires at least one free background
parameter and a previously optimized ROI model.
Parameters
---------
name: str
source name
{options}
Returns
---------
LightCurve : dict
Dictionary containing output of the LC analysis
"""
name = self.roi.get_source_by_name(name).name
# Create schema for method configuration
schema = ConfigSchema(self.defaults['lightcurve'],
optimizer=self.defaults['optimizer'])
schema.add_option('prefix', '')
config = utils.create_dict(self.config['lightcurve'],
optimizer=self.config['optimizer'])
config = schema.create_config(config, **kwargs)
self.logger.info('Computing Lightcurve for %s' % name)
o = self._make_lc(name, **config)
filename = utils.format_filename(self.workdir, 'lightcurve',
prefix=[config['prefix'],
name.lower().replace(' ', '_')])
o['file'] = None
if config['write_fits']:
o['file'] = os.path.basename(filename) + '.fits'
self._make_lc_fits(o, filename + '.fits', **config)
if config['write_npy']:
np.save(filename + '.npy', o)
self.logger.info('Finished Lightcurve')
return o
|
Generate a lightcurve for the named source. The function will
complete the basic analysis steps for each bin and perform a
likelihood fit for each bin. Extracted values (along with
errors) are Integral Flux, spectral model, Spectral index, TS
value, pred. # of photons. Note: successful calculation of
TS:subscript:`var` requires at least one free background
parameter and a previously optimized ROI model.
Parameters
---------
name: str
source name
{options}
Returns
---------
LightCurve : dict
Dictionary containing output of the LC analysis
|
entailment
|
def main():
""" Main function for command line usage """
usage = "usage: %(prog)s [options] "
description = "Merge a set of Fermi-LAT files."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('-o', '--output', default=None, type=str,
help='Output file.')
parser.add_argument('--ccube', default=None, type=str,
help='Input counts cube file .')
parser.add_argument('--bexpcube', default=None, type=str,
help='Input binned exposure cube.')
parser.add_argument('--hpx_order', default=None, type=int,
help='Order of output map: default = counts map order')
parser.add_argument('--clobber', action='store_true',
help='Overwrite output file')
args = parser.parse_args()
ccube = HpxMap.create_from_fits(args.ccube, hdu='SKYMAP')
bexpcube = HpxMap.create_from_fits(args.bexpcube, hdu='HPXEXPOSURES')
if args.hpx_order:
hpx_order = args.hpx_order
else:
hpx_order = ccube.hpx.order
out_cube = intensity_cube(ccube, bexpcube, hpx_order)
out_cube.hpx.write_fits(out_cube.data, args.output, clobber=args.clobber)
|
Main function for command line usage
|
entailment
|
def fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, costh_edges):
"""Generate a sequence of livetime distributions at the sky
positions given by ``skydir``. The output of the method are two
NxM arrays containing a sequence of histograms for N sky positions
and M incidence angle bins where the bin edges are defined by
``costh_edges``. This method uses the same algorithm as
`gtltcube` with the exception that SC time intervals are assumed
to be aligned with GTIs.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Vector of sky directions for which livetime histograms will be
accumulated.
tab_sc : `~astropy.table.Table`
Spacecraft table. Must contain the following columns: START,
STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH.
tab_gti : `~astropy.table.Table`
Table of good time intervals (GTIs).
zmax : float
Zenith cut.
costh_edges : `~numpy.ndarray`
Incidence angle bin edges in cos(angle).
Returns
-------
lt : `~numpy.ndarray`
Array of livetime histograms.
lt_wt : `~numpy.ndarray`
Array of histograms of weighted livetime (livetime x livetime
fraction).
"""
if len(tab_gti) == 0:
shape = (len(costh_edges) - 1, len(skydir))
return (np.zeros(shape), np.zeros(shape))
m = (tab_sc['START'] < tab_gti['STOP'][-1])
m &= (tab_sc['STOP'] > tab_gti['START'][0])
tab_sc = tab_sc[m]
cos_zmax = np.cos(np.radians(zmax))
sc_t0 = np.array(tab_sc['START'].data)
sc_t1 = np.array(tab_sc['STOP'].data)
sc_live = np.array(tab_sc['LIVETIME'].data)
sc_lfrac = sc_live / (sc_t1 - sc_t0)
sc_xyz = angle_to_cartesian(np.radians(tab_sc['RA_SCZ'].data),
np.radians(tab_sc['DEC_SCZ'].data))
zn_xyz = angle_to_cartesian(np.radians(tab_sc['RA_ZENITH'].data),
np.radians(tab_sc['DEC_ZENITH'].data))
tab_gti_t0 = np.array(tab_gti['START'].data)
tab_gti_t1 = np.array(tab_gti['STOP'].data)
# Index of the closest GTI interval
idx = np.digitize(sc_t0, tab_gti_t0) - 1
# start/stop time of closest GTI interval
gti_t0 = np.zeros_like(sc_t0)
gti_t1 = np.zeros_like(sc_t1)
gti_t0[idx >= 0] = tab_gti_t0[idx[idx >= 0]]
gti_t1[idx >= 0] = tab_gti_t1[idx[idx >= 0]]
nbin = len(costh_edges) - 1
lt = np.zeros((nbin,) + skydir.shape)
lt_wt = np.zeros((nbin,) + skydir.shape)
m0 = (idx >= 0) & (sc_t0 >= gti_t0) & (sc_t1 <= gti_t1)
xyz = angle_to_cartesian(skydir.ra.rad, skydir.dec.rad)
for i, t in enumerate(xyz):
cos_sep = utils.dot_prod(t, sc_xyz)
cos_zn = utils.dot_prod(t, zn_xyz)
m = m0 & (cos_zn > cos_zmax) & (cos_sep > 0.0)
bins = np.digitize(cos_sep[m], bins=costh_edges) - 1
bins = np.clip(bins, 0, nbin - 1)
lt[:, i] = np.bincount(bins, weights=sc_live[m], minlength=nbin)
lt_wt[:, i] = np.bincount(bins, weights=sc_live[m] * sc_lfrac[m],
minlength=nbin)
return lt, lt_wt
|
Generate a sequence of livetime distributions at the sky
positions given by ``skydir``. The output of the method are two
NxM arrays containing a sequence of histograms for N sky positions
and M incidence angle bins where the bin edges are defined by
``costh_edges``. This method uses the same algorithm as
`gtltcube` with the exception that SC time intervals are assumed
to be aligned with GTIs.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Vector of sky directions for which livetime histograms will be
accumulated.
tab_sc : `~astropy.table.Table`
Spacecraft table. Must contain the following columns: START,
STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH.
tab_gti : `~astropy.table.Table`
Table of good time intervals (GTIs).
zmax : float
Zenith cut.
costh_edges : `~numpy.ndarray`
Incidence angle bin edges in cos(angle).
Returns
-------
lt : `~numpy.ndarray`
Array of livetime histograms.
lt_wt : `~numpy.ndarray`
Array of histograms of weighted livetime (livetime x livetime
fraction).
|
entailment
|
def create(cls, ltfile):
"""Create a livetime cube from a single file or list of
files."""
if not re.search('\.txt?', ltfile) is None:
files = np.loadtxt(ltfile, unpack=True, dtype='str')
elif not isinstance(ltfile, list):
files = glob.glob(ltfile)
ltc = cls.create_from_fits(files[0])
for f in files[1:]:
ltc.load_ltfile(f)
return ltc
|
Create a livetime cube from a single file or list of
files.
|
entailment
|
def create_empty(cls, tstart, tstop, fill=0.0, nside=64):
"""Create an empty livetime cube."""
cth_edges = np.linspace(0, 1.0, 41)
domega = utils.edge_to_width(cth_edges) * 2.0 * np.pi
hpx = HPX(nside, True, 'CEL', ebins=cth_edges)
data = np.ones((len(cth_edges) - 1, hpx.npix)) * fill
return cls(data, hpx, cth_edges, tstart=tstart, tstop=tstop)
|
Create an empty livetime cube.
|
entailment
|
def get_skydir_lthist(self, skydir, cth_bins):
"""Get the livetime distribution (observing profile) for a given sky
direction with binning in incidence angle defined by
``cth_bins``.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky coordinate for which the observing profile will be
computed.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle.
"""
ra = skydir.ra.deg
dec = skydir.dec.deg
npts = 1
bins = utils.split_bin_edges(cth_bins, npts)
center = edge_to_center(bins)
width = edge_to_width(bins)
ipix = hp.ang2pix(self.hpx.nside, np.pi / 2. - np.radians(dec),
np.radians(ra), nest=self.hpx.nest)
lt = np.histogram(self._cth_center,
weights=self.data[:, ipix], bins=bins)[0]
lt = np.sum(lt.reshape(-1, npts), axis=1)
return lt
|
Get the livetime distribution (observing profile) for a given sky
direction with binning in incidence angle defined by
``cth_bins``.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky coordinate for which the observing profile will be
computed.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle.
|
entailment
|
def create_skydir_ltcube(self, skydir, tab_sc, tab_gti, zmax):
"""Create a new livetime cube by scaling this one by the
observing profile ratio in the direction ``skydir``. This
method can be used to generate an approximate livetime cube
that is accurate in the vicinity of ``skydir``.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
tab_sc : `~astropy.table.Table`
Spacecraft (FT2) table.
tab_gti : `~astropy.table.Table`
Table of GTIs.
zmax : float
Zenith angle cut.
"""
skydir = SkyCoord(np.array([skydir.ra.deg]),
np.array([skydir.dec.deg]), unit='deg')
lt, lt_wt = fill_livetime_hist(skydir, tab_sc, tab_gti, zmax,
self.costh_edges)
ipix = self.hpx.skydir_to_pixel(skydir)
lt_scale = np.ones_like(lt)
lt_wt_scale = np.ones_like(lt_wt)
m = self.data[:, ipix] > 0.0
lt_scale[m] = lt[m] / self.data[:, ipix][m]
lt_wt_scale[m] = lt_wt[m] / self._data_wt[:, ipix][m]
data = self.data * lt_scale
data_wt = self._data_wt * lt_wt_scale
return LTCube(data, copy.deepcopy(self.hpx), self.costh_edges,
# tstart=np.min(tab_gti_t0),
# tstop=np.max(tab_gti_t1),
zmax=zmax, data_wt=data_wt)
|
Create a new livetime cube by scaling this one by the
observing profile ratio in the direction ``skydir``. This
method can be used to generate an approximate livetime cube
that is accurate in the vicinity of ``skydir``.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
tab_sc : `~astropy.table.Table`
Spacecraft (FT2) table.
tab_gti : `~astropy.table.Table`
Table of GTIs.
zmax : float
Zenith angle cut.
|
entailment
|
def write(self, outfile):
"""Write the livetime cube to a FITS file."""
hdu_pri = fits.PrimaryHDU()
hdu_exp = self._create_exp_hdu(self.data)
hdu_exp.name = 'EXPOSURE'
hdu_exp_wt = self._create_exp_hdu(self._data_wt)
hdu_exp_wt.name = 'WEIGHTED_EXPOSURE'
cols = [Column(name='CTHETA_MIN', dtype='f4',
data=self.costh_edges[:-1][::-1]),
Column(name='CTHETA_MAX', dtype='f4',
data=self.costh_edges[1:][::-1]), ]
hdu_bnds = fits.table_to_hdu(Table(cols))
hdu_bnds.name = 'CTHETABOUNDS'
hdu_gti = fits.table_to_hdu(self._tab_gti)
hdu_gti.name = 'GTI'
hdus = [hdu_pri, hdu_exp, hdu_exp_wt,
hdu_bnds, hdu_gti]
for hdu in hdus:
hdu.header['TSTART'] = self.tstart
hdu.header['TSTOP'] = self.tstop
with fits.HDUList(hdus) as hdulist:
hdulist.writeto(outfile, clobber=True)
|
Write the livetime cube to a FITS file.
|
entailment
|
def main():
""" Main function for command line usage """
usage = "usage: %(prog)s [options] "
description = "Merge a set of Fermi-LAT files."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('-o', '--output', default=None, type=str,
help='Output file.')
parser.add_argument('--clobber', default=False, action='store_true',
help='Overwrite output file.')
parser.add_argument('--hdu', default=None, type=str,
help='HDU name.')
parser.add_argument('--gzip', action='store_true',
help='Compress output file')
parser.add_argument('--rm', action='store_true',
help='Remove input files.')
parser.add_argument('files', nargs='+', default=None,
help='List of input files.')
args = parser.parse_args()
hpx_map = merge_utils.stack_energy_planes_hpx(args.files, hdu=args.hdu)
if args.output:
hpx_map.hpx.write_fits(hpx_map.counts, args.output,
extname=args.hdu, clobber=args.clobber)
if args.gzip:
os.system('gzip -9 %s' % args.output)
if args.rm:
for farg in args.files:
flist = glob.glob(farg)
for ffound in flist:
os.path.unlink(ffound)
|
Main function for command line usage
|
entailment
|
def make_cube_slice(map_in, loge_bounds):
"""Extract a slice from a map cube object.
"""
# FIXME: This functionality should be moved into a slice method of
# gammapy.maps
axis = map_in.geom.axes[0]
i0 = utils.val_to_edge(axis.edges, 10**loge_bounds[0])[0]
i1 = utils.val_to_edge(axis.edges, 10**loge_bounds[1])[0]
new_axis = map_in.geom.axes[0].slice(slice(i0, i1))
geom = map_in.geom.to_image()
geom = geom.to_cube([new_axis])
map_out = WcsNDMap(geom, map_in.data[slice(i0, i1), ...].copy())
return map_out
|
Extract a slice from a map cube object.
|
entailment
|
def plot_sed(sed, showlnl=False, **kwargs):
"""Render a plot of a spectral energy distribution.
Parameters
----------
showlnl : bool
Overlay a map of the delta-loglikelihood values vs. flux
in each energy bin.
cmap : str
Colormap that will be used for the delta-loglikelihood
map.
llhcut : float
Minimum delta-loglikelihood value.
ul_ts_threshold : float
TS threshold that determines whether the MLE or UL
is plotted in each energy bin.
"""
ax = kwargs.pop('ax', plt.gca())
cmap = kwargs.get('cmap', 'BuGn')
annotate_name(sed, ax=ax)
SEDPlotter.plot_flux_points(sed, **kwargs)
if np.any(sed['ts'] > 9.):
if 'model_flux' in sed:
SEDPlotter.plot_model(sed['model_flux'],
noband=showlnl, **kwargs)
if showlnl:
SEDPlotter.plot_lnlscan(sed, **kwargs)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('Energy [MeV]')
ax.set_ylabel('E$^{2}$dN/dE [MeV cm$^{-2}$ s$^{-1}$]')
|
Render a plot of a spectral energy distribution.
Parameters
----------
showlnl : bool
Overlay a map of the delta-loglikelihood values vs. flux
in each energy bin.
cmap : str
Colormap that will be used for the delta-loglikelihood
map.
llhcut : float
Minimum delta-loglikelihood value.
ul_ts_threshold : float
TS threshold that determines whether the MLE or UL
is plotted in each energy bin.
|
entailment
|
def run(self, gta, mcube_map, **kwargs):
"""Make all plots."""
prefix = kwargs.get('prefix', 'test')
format = kwargs.get('format', self.config['format'])
loge_bounds = [None] + self.config['loge_bounds']
for x in loge_bounds:
self.make_roi_plots(gta, mcube_map, loge_bounds=x,
**kwargs)
imfile = utils.format_filename(self.config['fileio']['workdir'],
'counts_spectrum', prefix=[prefix],
extension=format)
make_counts_spectrum_plot(gta._roi_data, gta.roi,
gta.log_energies,
imfile, **kwargs)
|
Make all plots.
|
entailment
|
def make_residmap_plots(self, maps, roi=None, **kwargs):
"""Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
"""
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])
use_weights = kwargs.pop('use_weights', False)
# FIXME, how to set this:
no_contour = False
zoom = kwargs.get('zoom', None)
kwargs.setdefault('graticule_radii', self.config['graticule_radii'])
kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
cmap = kwargs.setdefault('cmap', self.config['cmap'])
cmap_resid = kwargs.pop('cmap_resid', self.config['cmap_resid'])
kwargs.setdefault('catalogs', self.config['catalogs'])
if no_contour:
sigma_levels = None
else:
sigma_levels = [-5, -3, 3, 5, 7] + list(np.logspace(1, 3, 17))
load_bluered_cmap()
prefix = maps['name']
mask = maps['mask']
if use_weights:
sigma_hist_data = maps['sigma'].data[maps['mask'].data.astype(
bool)]
maps['sigma'].data *= maps['mask'].data
maps['data'].data *= maps['mask'].data
maps['model'].data *= maps['mask'].data
maps['excess'].data *= maps['mask'].data
else:
sigma_hist_data = maps['sigma'].data
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['sigma'], roi=roi, **kwargs)
p.plot(vmin=-5, vmax=5, levels=sigma_levels,
cb_label='Significance [$\sigma$]', interpolation='bicubic',
cmap=cmap_resid, zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'residmap_sigma',
prefix=[prefix],
extension=fmt))
plt.close(fig)
# make and draw histogram
fig, ax = plt.subplots(figsize=figsize)
nBins = np.linspace(-6, 6, 121)
data = np.nan_to_num(sigma_hist_data)
# find best fit parameters
mu, sigma = norm.fit(data.flatten())
# make and draw the histogram
data[data > 6.0] = 6.0
data[data < -6.0] = -6.0
n, bins, patches = ax.hist(data.flatten(), nBins, density=True,
histtype='stepfilled',
facecolor='green', alpha=0.75)
# make and draw best fit line
y = norm.pdf(bins, mu, sigma)
ax.plot(bins, y, 'r--', linewidth=2)
y = norm.pdf(bins, 0.0, 1.0)
ax.plot(bins, y, 'k', linewidth=1)
# labels and such
ax.set_xlabel(r'Significance ($\sigma$)')
ax.set_ylabel('Probability')
paramtext = 'Gaussian fit:\n'
paramtext += '$\\mu=%.2f$\n' % mu
paramtext += '$\\sigma=%.2f$' % sigma
ax.text(0.05, 0.95, paramtext, verticalalignment='top',
horizontalalignment='left', transform=ax.transAxes)
plt.savefig(utils.format_filename(workdir,
'residmap_sigma_hist',
prefix=[prefix],
extension=fmt))
plt.close(fig)
vmax = max(np.max(maps['data'].data), np.max(maps['model'].data))
vmin = min(np.min(maps['data'].data), np.min(maps['model'].data))
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['data'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)
plt.savefig(utils.format_filename(workdir,
'residmap_data',
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['model'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)
plt.savefig(utils.format_filename(workdir,
'residmap_model',
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['excess'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap_resid)
plt.savefig(utils.format_filename(workdir,
'residmap_excess',
prefix=[prefix],
extension=fmt))
plt.close(fig)
|
Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
|
entailment
|
def make_tsmap_plots(self, maps, roi=None, **kwargs):
"""Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`. This method
generates a 2D sky map for the best-fit test source in
sqrt(TS) and Npred.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
"""
kwargs.setdefault('graticule_radii', self.config['graticule_radii'])
kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
kwargs.setdefault('cmap', self.config['cmap'])
kwargs.setdefault('catalogs', self.config['catalogs'])
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])
suffix = kwargs.pop('suffix', 'tsmap')
zoom = kwargs.pop('zoom', None)
if 'ts' not in maps:
return
sigma_levels = [3, 5, 7] + list(np.logspace(1, 3, 17))
prefix = maps['name']
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['sqrt_ts'], roi=roi, **kwargs)
p.plot(vmin=0, vmax=5, levels=sigma_levels,
cb_label='Sqrt(TS) [$\sigma$]', interpolation='bicubic',
zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'%s_sqrt_ts' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['npred'], roi=roi, **kwargs)
p.plot(vmin=0, cb_label='NPred [Counts]', interpolation='bicubic',
zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'%s_npred' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
# make and draw histogram
fig, ax = plt.subplots(figsize=figsize)
bins = np.linspace(0, 25, 101)
data = np.nan_to_num(maps['ts'].data.T)
data[data > 25.0] = 25.0
data[data < 0.0] = 0.0
n, bins, patches = ax.hist(data.flatten(), bins, density=True,
histtype='stepfilled',
facecolor='green', alpha=0.75)
# ax.plot(bins,(1-chi2.cdf(x,dof))/2.,**kwargs)
ax.plot(bins, 0.5 * chi2.pdf(bins, 1.0), color='k',
label=r"$\chi^2_{1} / 2$")
ax.set_yscale('log')
ax.set_ylim(1E-4)
ax.legend(loc='upper right', frameon=False)
# labels and such
ax.set_xlabel('TS')
ax.set_ylabel('Probability')
plt.savefig(utils.format_filename(workdir,
'%s_ts_hist' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
|
Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`. This method
generates a 2D sky map for the best-fit test source in
sqrt(TS) and Npred.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
|
entailment
|
def make_roi_plots(self, gta, mcube_tot, **kwargs):
"""Make various diagnostic plots for the 1D and 2D
counts/model distributions.
Parameters
----------
prefix : str
Prefix that will be appended to all filenames.
"""
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
loge_bounds = kwargs.get('loge_bounds', None)
weighted = kwargs.get('weighted', False)
roi_kwargs = {}
roi_kwargs.setdefault('loge_bounds', loge_bounds)
roi_kwargs.setdefault(
'graticule_radii', self.config['graticule_radii'])
roi_kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
roi_kwargs.setdefault('cmap', self.config['cmap'])
roi_kwargs.setdefault('catalogs', self._catalogs)
if loge_bounds is None:
loge_bounds = (gta.log_energies[0], gta.log_energies[-1])
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
mcube_diffuse = gta.model_counts_map('diffuse')
counts_map = gta.counts_map()
if weighted:
wmap = gta.weight_map()
counts_map = copy.deepcopy(counts_map)
mcube_tot = copy.deepcopy(mcube_tot)
counts_map.data *= wmap.data
mcube_tot.data *= wmap.data
mcube_diffuse.data *= wmap.data
# colors = ['k', 'b', 'g', 'r']
data_style = {'marker': 's', 'linestyle': 'None'}
fig = plt.figure(figsize=figsize)
if gta.projtype == "WCS":
xmin = -1
xmax = 1
elif gta.projtype == "HPX":
hpx2wcs = counts_map.make_wcs_mapping(proj='CAR', oversample=2)
counts_map = counts_map.to_wcs(hpx2wcs=hpx2wcs)
mcube_tot = mcube_tot.to_wcs(hpx2wcs=hpx2wcs)
mcube_diffuse = mcube_diffuse.to_wcs(hpx2wcs=hpx2wcs)
xmin = None
xmax = None
fig = plt.figure(figsize=figsize)
rp = ROIPlotter(mcube_tot, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='pow', gamma=1. / 3.)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_model_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
rp = ROIPlotter(counts_map, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='sqrt')
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
for iaxis, xlabel, psuffix in zip([0, 1],
['LON Offset [deg]', 'LAT Offset [deg]'],
['xproj', 'yproj']):
fig = plt.figure(figsize=figsize)
rp.plot_projection(iaxis, label='Data', color='k',
xmin=xmin, xmax=xmax, **data_style)
rp.plot_projection(iaxis, data=mcube_tot, label='Model', xmin=xmin, xmax=xmax,
noerror=True)
rp.plot_projection(iaxis, data=mcube_diffuse, label='Diffuse', xmin=xmin, xmax=xmax,
noerror=True)
plt.gca().set_ylabel('Counts')
plt.gca().set_xlabel(xlabel)
plt.gca().legend(frameon=False)
annotate(loge_bounds=loge_bounds)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map_%s%s.%s' % (prefix, psuffix,
esuffix, fmt)))
plt.close(fig)
|
Make various diagnostic plots for the 1D and 2D
counts/model distributions.
Parameters
----------
prefix : str
Prefix that will be appended to all filenames.
|
entailment
|
def _plot_extension(self, gta, prefix, src, loge_bounds=None, **kwargs):
"""Utility function for generating diagnostic plots for the
extension analysis."""
# format = kwargs.get('format', self.config['plotting']['format'])
if loge_bounds is None:
loge_bounds = (self.energies[0], self.energies[-1])
name = src['name'].lower().replace(' ', '_')
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
p = ExtensionPlotter(src, self.roi, '',
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(0)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
for i, c in enumerate(self.components):
suffix = '_%02i' % i
p = ExtensionPlotter(src, self.roi, suffix,
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
ROIPlotter.setup_projection_axis(0, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.gca().set_xlim(-2, 2)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
|
Utility function for generating diagnostic plots for the
extension analysis.
|
entailment
|
def extract_parameters(pil, keys=None):
"""Extract and return parameter names and values from a pil object
Parameters
----------
pil : `Pil` object
keys : list
List of parameter names, if None, extact all parameters
Returns
-------
out_dict : dict
Dictionary with parameter name, value pairs
"""
out_dict = {}
if keys is None:
keys = pil.keys()
for key in keys:
try:
out_dict[key] = pil[key]
except ValueError:
out_dict[key] = None
return out_dict
|
Extract and return parameter names and values from a pil object
Parameters
----------
pil : `Pil` object
keys : list
List of parameter names, if None, extact all parameters
Returns
-------
out_dict : dict
Dictionary with parameter name, value pairs
|
entailment
|
def update_gtapp(gtapp, **kwargs):
"""Update the parameters of the object that can run ScienceTools applications
Parameters
----------
gtapp : `GtApp.GtApp`
Object that will run the application in question
kwargs : arguments used to invoke the application
"""
for key, val in kwargs.items():
if key in ['pfiles', 'scratch']:
continue
if val is None:
continue
try:
gtapp[key] = val
except ValueError:
raise ValueError(
"gtapp failed to set parameter %s %s" % (key, val))
except KeyError:
raise KeyError("gtapp failed to set parameter %s %s" % (key, val))
|
Update the parameters of the object that can run ScienceTools applications
Parameters
----------
gtapp : `GtApp.GtApp`
Object that will run the application in question
kwargs : arguments used to invoke the application
|
entailment
|
def _set_pfiles(dry_run, **kwargs):
"""Set the PFILES env var
Parameters
----------
dry_run : bool
Don't actually run
Keyword arguments
-----------------
pfiles : str
Value to set PFILES
Returns
-------
pfiles_orig : str
Current value of PFILES envar
"""
pfiles_orig = os.environ['PFILES']
pfiles = kwargs.get('pfiles', None)
if pfiles:
if dry_run:
print("mkdir %s" % pfiles)
else:
try:
os.makedirs(pfiles)
except OSError:
pass
pfiles = "%s:%s" % (pfiles, pfiles_orig)
os.environ['PFILES'] = pfiles
return pfiles_orig
|
Set the PFILES env var
Parameters
----------
dry_run : bool
Don't actually run
Keyword arguments
-----------------
pfiles : str
Value to set PFILES
Returns
-------
pfiles_orig : str
Current value of PFILES envar
|
entailment
|
def build_gtapp(appname, dry_run, **kwargs):
"""Build an object that can run ScienceTools application
Parameters
----------
appname : str
Name of the application (e.g., gtbin)
dry_run : bool
Print command but do not run it
kwargs : arguments used to invoke the application
Returns `GtApp.GtApp` object that will run the application in question
"""
pfiles_orig = _set_pfiles(dry_run, **kwargs)
gtapp = GtApp.GtApp(appname)
update_gtapp(gtapp, **kwargs)
_reset_pfiles(pfiles_orig)
return gtapp
|
Build an object that can run ScienceTools application
Parameters
----------
appname : str
Name of the application (e.g., gtbin)
dry_run : bool
Print command but do not run it
kwargs : arguments used to invoke the application
Returns `GtApp.GtApp` object that will run the application in question
|
entailment
|
def run_gtapp(gtapp, stream, dry_run, **kwargs):
"""Runs one on the ScienceTools apps
Taken from fermipy.gtanalysis.run_gtapp by Matt Wood
Parameters
----------
gtapp : `GtApp.GtApp` object
The application (e.g., gtbin)
stream : stream object
Must have 'write' function
dry_run : bool
Print command but do not run it
kwargs : arguments used to invoke the application
"""
if stream is None:
stream = sys.stdout
pfiles_orig = _set_pfiles(dry_run, **kwargs)
update_gtapp(gtapp, **kwargs)
stream.write("%s\n" % gtapp.command())
stream.flush()
if dry_run:
_reset_pfiles(pfiles_orig)
return 0
try:
stdin, stdout = gtapp.runWithOutput(print_command=False)
for line in stdout:
stream.write(line.strip())
stream.flush()
return_code = 0
except:
stream.write('Exited with exit code -1\n')
return_code = -1
_reset_pfiles(pfiles_orig)
return return_code
|
Runs one on the ScienceTools apps
Taken from fermipy.gtanalysis.run_gtapp by Matt Wood
Parameters
----------
gtapp : `GtApp.GtApp` object
The application (e.g., gtbin)
stream : stream object
Must have 'write' function
dry_run : bool
Print command but do not run it
kwargs : arguments used to invoke the application
|
entailment
|
def update_args(self, override_args):
"""Update the argument used to invoke the application
See help for `chain.Link` for details
This calls the base class function then fills the parameters of the GtApp object
"""
Link.update_args(self, override_args)
dry_run = override_args.get('dry_run', False)
if self.__app is None:
self.__app = build_gtapp(self.appname, dry_run, **self.args)
#except:
# raise ValueError("Failed to build link %s %s %s" %
# (self.linkname, self.appname, self.args))
else:
update_gtapp(self.__app, **self.args)
|
Update the argument used to invoke the application
See help for `chain.Link` for details
This calls the base class function then fills the parameters of the GtApp object
|
entailment
|
def run_command(self, stream=sys.stdout, dry_run=False):
"""Runs the command for this link. This method can be overridden by
sub-classes to invoke a different command
Parameters
-----------
stream : `file`
Must have 'write' function
dry_run : bool
Print command but do not run it
"""
return run_gtapp(self.__app, stream, dry_run, **self.args)
|
Runs the command for this link. This method can be overridden by
sub-classes to invoke a different command
Parameters
-----------
stream : `file`
Must have 'write' function
dry_run : bool
Print command but do not run it
|
entailment
|
def command_template(self):
"""Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
"""
com_out = self.appname
for key, val in self.args.items():
if key in self._options:
com_out += ' %s={%s}' % (key, key)
else:
com_out += ' %s=%s' % (key, val)
return com_out
|
Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
|
entailment
|
def run_analysis(self, argv):
""" Build the manifest for all the models
"""
args = self._parser.parse_args(argv)
components = Component.build_from_yamlfile(args.comp)
NAME_FACTORY.update_base_dict(args.data)
model_dict = make_library(**args.__dict__)
model_manager = model_dict['ModelManager']
models = load_yaml(args.models)
data = args.data
hpx_order = args.hpx_order
for modelkey in models:
model_manager.make_srcmap_manifest(modelkey, components, data)
model_manager.make_fermipy_config_yaml(modelkey, components, data,
hpx_order=hpx_order,
irf_ver=NAME_FACTORY.irf_ver())
|
Build the manifest for all the models
|
entailment
|
def copy_ccube(ccube, outsrcmap, hpx_order):
"""Copy a counts cube into outsrcmap file
reducing the HEALPix order to hpx_order if needed.
"""
sys.stdout.write(" Copying counts cube from %s to %s\n" % (ccube, outsrcmap))
try:
hdulist_in = fits.open(ccube)
except IOError:
hdulist_in = fits.open("%s.gz" % ccube)
hpx_order_in = hdulist_in[1].header['ORDER']
if hpx_order_in > hpx_order:
hpxmap = HpxMap.create_from_hdulist(hdulist_in)
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hpxlist_out = hdulist_in
#hpxlist_out['SKYMAP'] = hpxmap_out.create_image_hdu()
hpxlist_out[1] = hpxmap_out.create_image_hdu()
hpxlist_out[1].name = 'SKYMAP'
hpxlist_out.writeto(outsrcmap)
return hpx_order
else:
os.system('cp %s %s' % (ccube, outsrcmap))
#os.system('cp %s.gz %s.gz' % (ccube, outsrcmap))
#os.system('gunzip -f %s.gz' % (outsrcmap))
return None
|
Copy a counts cube into outsrcmap file
reducing the HEALPix order to hpx_order if needed.
|
entailment
|
def append_hdus(hdulist, srcmap_file, source_names, hpx_order):
"""Append HEALPix maps to a list
Parameters
----------
hdulist : list
The list being appended to
srcmap_file : str
Path to the file containing the HDUs
source_names : list of str
Names of the sources to extract from srcmap_file
hpx_order : int
Maximum order for maps
"""
sys.stdout.write(" Extracting %i sources from %s" % (len(source_names), srcmap_file))
try:
hdulist_in = fits.open(srcmap_file)
except IOError:
try:
hdulist_in = fits.open('%s.gz' % srcmap_file)
except IOError:
sys.stdout.write(" Missing file %s\n" % srcmap_file)
return
for source_name in source_names:
sys.stdout.write('.')
sys.stdout.flush()
if hpx_order is None:
hdulist.append(hdulist_in[source_name])
else:
try:
hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name)
except IndexError:
print(" Index error on source %s in file %s" % (source_name, srcmap_file))
continue
except KeyError:
print(" Key error on source %s in file %s" % (source_name, srcmap_file))
continue
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hdulist.append(hpxmap_out.create_image_hdu(name=source_name))
sys.stdout.write("\n")
hdulist.flush()
hdulist_in.close()
|
Append HEALPix maps to a list
Parameters
----------
hdulist : list
The list being appended to
srcmap_file : str
Path to the file containing the HDUs
source_names : list of str
Names of the sources to extract from srcmap_file
hpx_order : int
Maximum order for maps
|
entailment
|
def assemble_component(compname, compinfo, hpx_order):
"""Assemble the source map file for one binning component
Parameters
----------
compname : str
The key for this component (e.g., E0_PSF3)
compinfo : dict
Information about this component
hpx_order : int
Maximum order for maps
"""
sys.stdout.write("Working on component %s\n" % compname)
ccube = compinfo['ccube']
outsrcmap = compinfo['outsrcmap']
source_dict = compinfo['source_dict']
hpx_order = AssembleModel.copy_ccube(ccube, outsrcmap, hpx_order)
hdulist = AssembleModel.open_outsrcmap(outsrcmap)
for comp_name in sorted(source_dict.keys()):
source_info = source_dict[comp_name]
source_names = source_info['source_names']
srcmap_file = source_info['srcmap_file']
AssembleModel.append_hdus(hdulist, srcmap_file,
source_names, hpx_order)
sys.stdout.write("Done!\n")
|
Assemble the source map file for one binning component
Parameters
----------
compname : str
The key for this component (e.g., E0_PSF3)
compinfo : dict
Information about this component
hpx_order : int
Maximum order for maps
|
entailment
|
def run_analysis(self, argv):
"""Assemble the source map file for one binning component
FIXME
"""
args = self._parser.parse_args(argv)
manifest = yaml.safe_load(open(args.input))
compname = args.compname
value = manifest[compname]
self.assemble_component(compname, value, args.hpx_order)
|
Assemble the source map file for one binning component
FIXME
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
models = load_yaml(args['models'])
for modelkey in models:
manifest = os.path.join('analysis', 'model_%s' % modelkey,
'srcmap_manifest_%s.yaml' % modelkey)
for comp in components:
key = comp.make_key('{ebin_name}_{evtype_name}')
fullkey = "%s_%s" % (modelkey, key)
outfile = NAME_FACTORY.merged_srcmaps(modelkey=modelkey,
component=key,
coordsys=comp.coordsys,
mktime='none',
irf_ver=NAME_FACTORY.irf_ver())
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[fullkey] = dict(input=manifest,
compname=key,
logfile=logfile)
return job_configs
|
Hook to build job configurations
|
entailment
|
def _map_arguments(self, input_dict):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = input_dict.get('data')
comp = input_dict.get('comp')
library = input_dict.get('library')
models = input_dict.get('models')
hpx_order = input_dict.get('hpx_order_fitting')
dry_run = input_dict.get('dry_run', False)
self._set_link('init-model', InitModel,
comp=comp, data=data,
library=library,
models=models,
hpx_order=hpx_order,
dry_run=dry_run)
self._set_link('assemble-model', AssembleModel_SG,
comp=comp, data=data,
models=models)
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
if not HAVE_ST:
raise RuntimeError(
"Trying to run fermipy analysis, but don't have ST")
gta = GTAnalysis(args.config, logging={'verbosity': 3},
fileio={'workdir_regex': '\.xml$|\.npy$'})
gta.setup(overwrite=False)
baseline_roi_fit(gta, make_plots=args.make_plots,
minmax_npred=[1e3, np.inf])
localize_sources(gta, nstep=5, dtheta_max=0.5, update=True,
prefix='base', make_plots=args.make_plots)
gta.find_sources(sqrt_ts_threshold=5.0, search_skydir=gta.roi.skydir,
search_minmax_radius=[1.0, np.nan])
gta.optimize()
gta.print_roi()
gta.print_params()
gta.free_sources(skydir=gta.roi.skydir, distance=1.0, pars='norm')
gta.fit(covar=True)
gta.print_roi()
gta.print_params()
gta.write_roi(args.roi_baseline, make_plots=args.make_plots)
|
Run this analysis
|
entailment
|
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
if not HAVE_ST:
raise RuntimeError(
"Trying to run fermipy analysis, but don't have ST")
if is_null(args.skydirs):
skydir_dict = None
else:
skydir_dict = load_yaml(args.skydirs)
gta = GTAnalysis(args.config,
logging={'verbosity': 3},
fileio={'workdir_regex': '\.xml$|\.npy$'})
#gta.setup(overwrite=False)
gta.load_roi(args.roi_baseline)
gta.print_roi()
basedir = os.path.dirname(args.config)
# This should be a no-op, b/c it was done in the baseline analysis
for profile in args.profiles:
if skydir_dict is None:
skydir_keys = [None]
else:
skydir_keys = sorted(skydir_dict.keys())
for skydir_key in skydir_keys:
if skydir_key is None:
pkey, psrc_name, pdict = build_profile_dict(basedir, profile)
else:
skydir_val = skydir_dict[skydir_key]
pkey, psrc_name, pdict = build_profile_dict(basedir, profile)
pdict['ra'] = skydir_val['ra']
pdict['dec'] = skydir_val['dec']
pkey += "_%06i" % skydir_key
outfile = "sed_%s.fits" % pkey
# Add the source and get the list of correlated soruces
correl_dict, test_src_name = add_source_get_correlated(gta, psrc_name,
pdict, correl_thresh=0.25,
non_null_src=args.non_null_src)
# Write the list of correlated sources
correl_yaml = os.path.join(basedir, "correl_%s.yaml" % pkey)
write_yaml(correl_dict, correl_yaml)
gta.free_sources(False)
for src_name in correl_dict.keys():
gta.free_source(src_name, pars='norm')
# build the SED
if args.non_null_src:
gta.update_source(test_src_name, reoptimize=True)
gta.write_roi("base_%s"% pkey, make_plots=False)
gta.sed(test_src_name, prefix=pkey, outfile=outfile, make_plots=args.make_plots)
# remove the source
gta.delete_source(test_src_name)
# put the ROI back to how it was
gta.load_xml(args.roi_baseline)
return gta
|
Run this analysis
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)
if sim is not None:
raise ValueError("Found 'sim' argument on AnalyzeROI_SG config.")
if targets_yaml is None:
return job_configs
config_yaml = 'config.yaml'
config_override = args.get('config')
if is_not_null(config_override):
config_yaml = config_override
targets = load_yaml(targets_yaml)
base_config = dict(roi_baseline=args['roi_baseline'],
make_plots=args['make_plots'])
for target_name in targets.keys():
name_keys = dict(target_type=ttype,
target_name=target_name,
fullpath=True)
target_dir = NAME_FACTORY.targetdir(**name_keys)
config_path = os.path.join(target_dir, config_yaml)
logfile = make_nfs_path(os.path.join(
target_dir, "%s_%s.log" % (self.linkname, target_name)))
job_config = base_config.copy()
job_config.update(dict(config=config_path,
logfile=logfile))
job_configs[target_name] = job_config
return job_configs
|
Hook to build job configurations
|
entailment
|
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = list()
data = ctypes.create_string_buffer(8)
while size > 0:
length = min(size, 8)
n = hidapi.hid_read_timeout(self.device, data, length, 100)
if n <= 0:
raise IOError(
'pywws.device_ctypes_hidapi.USBDevice.read_data failed')
for i in range(n):
result.append(ord(data[i]))
size -= n
return result
|
Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
|
entailment
|
def write_data(self, buf):
"""Send data to the device.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
data = ''.join(map(chr, buf))
size = len(data)
if hidapi.hid_write(self.device, ctypes.c_char_p(data), size) != size:
raise IOError(
'pywws.device_ctypes_hidapi.USBDevice.write_data failed')
return True
|
Send data to the device.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
|
entailment
|
def zambretti_code(params, hourly_data):
"""Simple implementation of Zambretti forecaster algorithm.
Inspired by beteljuice.com Java algorithm, as converted to Python by
honeysucklecottage.me.uk, and further information
from http://www.meteormetrics.com/zambretti.htm"""
north = literal_eval(params.get('Zambretti', 'north', 'True'))
baro_upper = float(params.get('Zambretti', 'baro upper', '1050.0'))
baro_lower = float(params.get('Zambretti', 'baro lower', '950.0'))
if not hourly_data['rel_pressure']:
return ''
if hourly_data['wind_ave'] is None or hourly_data['wind_ave'] < 0.3:
wind = None
else:
wind = hourly_data['wind_dir']
if hourly_data['pressure_trend'] is None:
trend = 0.0
else:
trend = hourly_data['pressure_trend'] / 3.0
# normalise pressure
pressure = 950.0 + (
(1050.0 - 950.0) * (hourly_data['rel_pressure'] - baro_lower) /
(baro_upper - baro_lower))
# adjust pressure for wind direction
if wind is not None:
if not isinstance(wind, int):
wind = int(wind + 0.5) % 16
if not north:
# southern hemisphere, so add 180 degrees
wind = (wind + 8) % 16
pressure += ( 5.2, 4.2, 3.2, 1.05, -1.1, -3.15, -5.2, -8.35,
-11.5, -9.4, -7.3, -5.25, -3.2, -1.15, 0.9, 3.05)[wind]
# compute base forecast from pressure and trend (hPa / hour)
summer = north == (hourly_data['idx'].month >= 4 and
hourly_data['idx'].month <= 9)
if trend >= 0.1:
# rising pressure
if summer:
pressure += 3.2
F = 0.1740 * (1031.40 - pressure)
LUT = ('A', 'B', 'B', 'C', 'F', 'G', 'I', 'J', 'L', 'M', 'M', 'Q', 'T',
'Y')
elif trend <= -0.1:
# falling pressure
if summer:
pressure -= 3.2
F = 0.1553 * (1029.95 - pressure)
LUT = ('B', 'D', 'H', 'O', 'R', 'U', 'V', 'X', 'X', 'Z')
else:
# steady
F = 0.2314 * (1030.81 - pressure)
LUT = ('A', 'B', 'B', 'B', 'E', 'K', 'N', 'N', 'P', 'P', 'S', 'W', 'W',
'X', 'X', 'X', 'Z')
# clip to range of lookup table
F = min(max(int(F + 0.5), 0), len(LUT) - 1)
# convert to letter code
return LUT[F]
|
Simple implementation of Zambretti forecaster algorithm.
Inspired by beteljuice.com Java algorithm, as converted to Python by
honeysucklecottage.me.uk, and further information
from http://www.meteormetrics.com/zambretti.htm
|
entailment
|
def read_block(self, address):
"""Read 32 bytes from the weather station.
If the read fails for any reason, :obj:`None` is returned.
:param address: address to read from.
:type address: int
:return: the data from the weather station.
:rtype: list(int)
"""
buf = [
self.ReadCommand,
address // 256,
address % 256,
self.EndMark,
self.ReadCommand,
address // 256,
address % 256,
self.EndMark,
]
if not self.dev.write_data(buf):
return None
return self.dev.read_data(32)
|
Read 32 bytes from the weather station.
If the read fails for any reason, :obj:`None` is returned.
:param address: address to read from.
:type address: int
:return: the data from the weather station.
:rtype: list(int)
|
entailment
|
def write_byte(self, address, data):
"""Write a single byte to the weather station.
:param address: address to write to.
:type address: int
:param data: the value to write.
:type data: int
:return: success status.
:rtype: bool
"""
buf = [
self.WriteCommandWord,
address // 256,
address % 256,
self.EndMark,
self.WriteCommandWord,
data,
0,
self.EndMark,
]
if not self.dev.write_data(buf):
return False
buf = self.dev.read_data(8)
if buf is None:
return False
for byte in buf:
if byte != 0xA5:
return False
return True
|
Write a single byte to the weather station.
:param address: address to write to.
:type address: int
:param data: the value to write.
:type data: int
:return: success status.
:rtype: bool
|
entailment
|
def inc_ptr(self, ptr):
"""Get next circular buffer data pointer."""
result = ptr + self.reading_len[self.ws_type]
if result >= 0x10000:
result = self.data_start
return result
|
Get next circular buffer data pointer.
|
entailment
|
def dec_ptr(self, ptr):
"""Get previous circular buffer data pointer."""
result = ptr - self.reading_len[self.ws_type]
if result < self.data_start:
result = 0x10000 - self.reading_len[self.ws_type]
return result
|
Get previous circular buffer data pointer.
|
entailment
|
def get_raw_data(self, ptr, unbuffered=False):
"""Get raw data from circular buffer.
If unbuffered is false then a cached value that was obtained
earlier may be returned."""
if unbuffered:
self._data_pos = None
# round down ptr to a 'block boundary'
idx = ptr - (ptr % 0x20)
ptr -= idx
count = self.reading_len[self.ws_type]
if self._data_pos == idx:
# cache contains useful data
result = self._data_block[ptr:ptr + count]
if len(result) >= count:
return result
else:
result = list()
if ptr + count > 0x20:
# need part of next block, which may be in cache
if self._data_pos != idx + 0x20:
self._data_pos = idx + 0x20
self._data_block = self._read_block(self._data_pos)
result += self._data_block[0:ptr + count - 0x20]
if len(result) >= count:
return result
# read current block
self._data_pos = idx
self._data_block = self._read_block(self._data_pos)
result = self._data_block[ptr:ptr + count] + result
return result
|
Get raw data from circular buffer.
If unbuffered is false then a cached value that was obtained
earlier may be returned.
|
entailment
|
def get_data(self, ptr, unbuffered=False):
"""Get decoded data from circular buffer.
If unbuffered is false then a cached value that was obtained
earlier may be returned."""
result = _decode(self.get_raw_data(ptr, unbuffered),
self._reading_format[self.ws_type])
return result
|
Get decoded data from circular buffer.
If unbuffered is false then a cached value that was obtained
earlier may be returned.
|
entailment
|
def current_pos(self):
"""Get circular buffer location where current data is being written."""
new_ptr = _decode(
self._read_fixed_block(0x0020), self.lo_fix_format['current_pos'])
if new_ptr == self._current_ptr:
return self._current_ptr
if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr):
logger.error(
'unexpected ptr change %06x -> %06x', self._current_ptr, new_ptr)
self._current_ptr = new_ptr
return self._current_ptr
|
Get circular buffer location where current data is being written.
|
entailment
|
def get_raw_fixed_block(self, unbuffered=False):
"""Get the raw "fixed block" of settings and min/max data."""
if unbuffered or not self._fixed_block:
self._fixed_block = self._read_fixed_block()
return self._fixed_block
|
Get the raw "fixed block" of settings and min/max data.
|
entailment
|
def get_fixed_block(self, keys=[], unbuffered=False):
"""Get the decoded "fixed block" of settings and min/max data.
A subset of the entire block can be selected by keys."""
if unbuffered or not self._fixed_block:
self._fixed_block = self._read_fixed_block()
format = self.fixed_format
# navigate down list of keys to get to wanted data
for key in keys:
format = format[key]
return _decode(self._fixed_block, format)
|
Get the decoded "fixed block" of settings and min/max data.
A subset of the entire block can be selected by keys.
|
entailment
|
def write_data(self, data):
"""Write a set of single bytes to the weather station. Data must be an
array of (ptr, value) pairs."""
# send data
for ptr, value in data:
self._write_byte(ptr, value)
# set 'data changed'
self._write_byte(self.fixed_format['data_changed'][0], 0xAA)
# wait for station to clear 'data changed'
while True:
ack = _decode(
self._read_fixed_block(0x0020), self.fixed_format['data_changed'])
if ack == 0:
break
logger.debug('write_data waiting for ack')
time.sleep(6)
|
Write a set of single bytes to the weather station. Data must be an
array of (ptr, value) pairs.
|
entailment
|
def _find_device(self, idVendor, idProduct):
"""Find a USB device by product and vendor id."""
for bus in usb.busses():
for device in bus.devices:
if (device.idVendor == idVendor and
device.idProduct == idProduct):
return device
return None
|
Find a USB device by product and vendor id.
|
entailment
|
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.devh.interruptRead(0x81, size, 1200)
if result is None or len(result) < size:
raise IOError('pywws.device_libusb.USBDevice.read_data failed')
return list(result)
|
Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
|
entailment
|
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
result = self.devh.controlMsg(
usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,
usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb.USBDevice.write_data failed')
return True
|
Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
|
entailment
|
def _adapt_WSDateTime(dt):
"""Return unix timestamp of the datetime like input.
If conversion overflows high, return sint64_max ,
if underflows, return 0
"""
try:
ts = int(
(dt.replace(tzinfo=pytz.utc)
- datetime(1970,1,1,tzinfo=pytz.utc)
).total_seconds()
)
except (OverflowError,OSError):
if dt < datetime.now():
ts = 0
else:
ts = 2**63-1
return ts
|
Return unix timestamp of the datetime like input.
If conversion overflows high, return sint64_max ,
if underflows, return 0
|
entailment
|
def _predicate(self, i):
"""Given a valid datetime or slace, return the predicate portion
of the SQL query, a boolean indicating whether multiple items are
expected from the result, and a dictionary of parameters for the query
"""
if isinstance(i, slice):
if i.step is not None:
raise TypeError("Slice step not permitted")
if ( (i.start is not None and not isinstance(i.start, datetime))
or (i.stop is not None and not isinstance(i.stop, datetime))
):
raise TypeError(
"Slice indices must be {} or None".format(datetime)
)
if i.start is not None and i.stop is not None:
if i.start > i.stop:
raise ValueError(
"Start index is greater than the End index"
)
else:
# Substitution of the key coloumn, but the
# parameters themselves will be substituted by sqlite3
predicate = "WHERE {} BETWEEN :start AND :stop".format(
self._keycol
)
elif i.start is not None:
# i.stop will also be None
predicate = "WHERE {} >= :start".format(self._keycol)
elif i.stop is not None:
# i.start will also be None
predicate = "WHERE {} <= :stop".format(self._keycol)
else:
# both are None, so equivelent to wanting everything
predicate = ""
multi = True
pred = {"start": i.start, "stop": i.stop}
elif isinstance(i, datetime):
# Substitution of the key coloumn, but the
# parameters themselves will be substituted by sqlite3
predicate = "WHERE {} = :key".format(self._keycol)
multi = False
pred = {"key": i}
else:
# not a slice or a datetime object
raise TypeError("List indices must be {}".format(datetime))
# predicate is the end of the query string.
# multi is a boolean indicating whether the result should be iterable
# or not. pred is a dict of the parameters for substitution
return (predicate, multi, pred)
|
Given a valid datetime or slace, return the predicate portion
of the SQL query, a boolean indicating whether multiple items are
expected from the result, and a dictionary of parameters for the query
|
entailment
|
def update(self, i):
"""D.update(E) -> None. Update D from iterable E with pre-existing
items being overwritten.
Elements in E are assumed to be dicts containing the primary key to
allow the equivelent of:
for k in E: D[k.primary_key] = k
"""
key_list = self.key_list
keynone = {key:None for key in key_list}
# Generator which fills in missing data from the original iterator
def datagen(i):
for datum in i:
tmp = keynone.copy()
tmp.update(datum)
yield tmp
with self._connection as con:
con.executemany(
"""INSERT OR REPLACE INTO {table} ({keylist})
VALUES (:{vallist});
""".format(table=self.table,
keylist=", ".join(self.key_list),
vallist=", :".join(self.key_list)
), datagen(i))
|
D.update(E) -> None. Update D from iterable E with pre-existing
items being overwritten.
Elements in E are assumed to be dicts containing the primary key to
allow the equivelent of:
for k in E: D[k.primary_key] = k
|
entailment
|
def before(self, i):
"""Return datetime of newest existing data record whose datetime
is < idx. If no such record exists, return None.
"""
if not isinstance(i, datetime):
raise TypeError("'{}' is not a datetime object".format(i))
else:
result = self._connection.execute(
"""SELECT {selkeycol} FROM {table} WHERE
{keycol} < :key ORDER BY {keycol} DESC LIMIT 1;
""".format(
selkeycol=self.selkeycol,
table=self.table,
keycol=self._keycol
), {"key":i}).fetchone()
return result[self._keycol] if result is not None else None
|
Return datetime of newest existing data record whose datetime
is < idx. If no such record exists, return None.
|
entailment
|
def keys(self):
"""D.keys() -> a set-like object providing a view on D's keys"""
return set(
row[self._keycol] for row in self._connection.execute(
"""SELECT DISTINCT {} FROM {} ORDER BY {} ASC;""".format(
self.selkeycol,
self.table,
self._keycol
)
)
)
|
D.keys() -> a set-like object providing a view on D's keys
|
entailment
|
def items(self):
"""D.items() -> a set-like object providing a view on D's items"""
keycol = self._keycol
for row in self.__iter__():
yield (row[keycol], dict(row))
|
D.items() -> a set-like object providing a view on D's items
|
entailment
|
def clear(self):
"""S.clear() -> None -- remove all items from S"""
with self._connection as con:
con.execute("DELETE FROM {};".format(self.table))
|
S.clear() -> None -- remove all items from S
|
entailment
|
def popitem(self):
"""D.popitem() -> (k, v)
Remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
"""
try:
value = next(iter(self))
key = value[self._keycol]
except StopIteration:
raise KeyError
del self[key]
return key, value
|
D.popitem() -> (k, v)
Remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
|
entailment
|
def set_locale(lang):
"""Set the 'locale' used by a program.
This affects the entire application, changing the way dates,
currencies and numbers are represented. It should not be called
from a library routine that may be used in another program.
The ``lang`` parameter can be any string that is recognised by
``locale.setlocale()``, for example ``en``, ``en_GB`` or ``en_GB.UTF-8``.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool
"""
# get the default locale
lc, encoding = locale.getdefaultlocale()
try:
if '.' in lang:
locale.setlocale(locale.LC_ALL, lang)
else:
locale.setlocale(locale.LC_ALL, (lang, encoding))
except locale.Error:
return False
return True
|
Set the 'locale' used by a program.
This affects the entire application, changing the way dates,
currencies and numbers are represented. It should not be called
from a library routine that may be used in another program.
The ``lang`` parameter can be any string that is recognised by
``locale.setlocale()``, for example ``en``, ``en_GB`` or ``en_GB.UTF-8``.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool
|
entailment
|
def set_translation(lang):
"""Set the translation used by (some) pywws modules.
This sets the translation object ``pywws.localisation.translation``
to use a particular language.
The ``lang`` parameter can be any string of the form ``en``,
``en_GB`` or ``en_GB.UTF-8``. Anything after a ``.`` character is
ignored. In the case of a string such as ``en_GB``, the routine
will search for an ``en_GB`` language file before searching for an
``en`` one.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool
"""
global translation
# make list of possible languages, in order of preference
langs = list()
if lang:
if '.' in lang:
lang = lang.split('.')[0]
langs += [lang, lang[:2]]
# get translation object
path = pkg_resources.resource_filename('pywws', 'lang')
codeset = locale.getpreferredencoding()
if codeset == 'ASCII':
codeset = 'UTF-8'
try:
translation = gettext.translation(
'pywws', path, languages=langs, codeset=codeset)
# Python 3 translations don't have a ugettext method
if not hasattr(translation, 'ugettext'):
translation.ugettext = translation.gettext
except IOError:
return False
return True
|
Set the translation used by (some) pywws modules.
This sets the translation object ``pywws.localisation.translation``
to use a particular language.
The ``lang`` parameter can be any string of the form ``en``,
``en_GB`` or ``en_GB.UTF-8``. Anything after a ``.`` character is
ignored. In the case of a string such as ``en_GB``, the routine
will search for an ``en_GB`` language file before searching for an
``en`` one.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool
|
entailment
|
def set_application_language(params):
"""Set the locale and translation for a pywws program.
This function reads the language from the configuration file, then
calls :func:`set_locale` and :func:`set_translation`.
:param params: a :class:`pywws.storage.params` object.
:type params: object
"""
lang = params.get('config', 'language', None)
if lang:
set_locale(lang)
set_translation(lang)
|
Set the locale and translation for a pywws program.
This function reads the language from the configuration file, then
calls :func:`set_locale` and :func:`set_translation`.
:param params: a :class:`pywws.storage.params` object.
:type params: object
|
entailment
|
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = list()
while size > 0:
count = min(size, 8)
buf = self.hid.read(count)
if len(buf) < count:
raise IOError(
'pywws.device_cython_hidapi.USBDevice.read_data failed')
result += buf
size -= count
return result
|
Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
|
entailment
|
def write_data(self, buf):
"""Send data to the device.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
if self.hid.write(buf) != len(buf):
raise IOError(
'pywws.device_cython_hidapi.USBDevice.write_data failed')
return True
|
Send data to the device.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.