sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def run(self):
'''
Change to a temp directory
Run bash script containing commands
Place results in specified output file
Clean up temp directory
'''
qry = os.path.abspath(self.qry)
ref = os.path.abspath(self.ref)
outfile = os.path.abspath(self.outfile)
tmpdir = tempfile.mkdtemp(prefix='tmp.run_nucmer.', dir=os.getcwd())
original_dir = os.getcwd()
os.chdir(tmpdir)
script = 'run_nucmer.sh'
self._write_script(script, ref, qry, outfile)
syscall.run('bash ' + script, verbose=self.verbose)
os.chdir(original_dir)
shutil.rmtree(tmpdir)
|
Change to a temp directory
Run bash script containing commands
Place results in specified output file
Clean up temp directory
|
entailment
|
def update_indel(self, nucmer_snp):
'''Indels are reported over multiple lines, 1 base insertion or deletion per line. This method extends the current variant by 1 base if it's an indel and adjacent to the new SNP and returns True. If the current variant is a SNP, does nothing and returns False'''
new_variant = Variant(nucmer_snp)
if self.var_type not in [INS, DEL] \
or self.var_type != new_variant.var_type \
or self.qry_name != new_variant.qry_name \
or self.ref_name != new_variant.ref_name \
or self.reverse != new_variant.reverse:
return False
if self.var_type == INS \
and self.ref_start == new_variant.ref_start \
and self.qry_end + 1 == new_variant.qry_start:
self.qry_base += new_variant.qry_base
self.qry_end += 1
return True
if self.var_type == DEL \
and self.qry_start == new_variant.qry_start \
and self.ref_end + 1 == new_variant.ref_start:
self.ref_base += new_variant.ref_base
self.ref_end += 1
return True
return False
|
Indels are reported over multiple lines, 1 base insertion or deletion per line. This method extends the current variant by 1 base if it's an indel and adjacent to the new SNP and returns True. If the current variant is a SNP, does nothing and returns False
|
entailment
|
def reader(fname):
'''Helper function to open the results file (coords file) and create alignment objects with the values in it'''
f = pyfastaq.utils.open_file_read(fname)
for line in f:
if line.startswith('[') or (not '\t' in line):
continue
yield alignment.Alignment(line)
pyfastaq.utils.close(f)
|
Helper function to open the results file (coords file) and create alignment objects with the values in it
|
entailment
|
def convert_to_msp_crunch(infile, outfile, ref_fai=None, qry_fai=None):
'''Converts a coords file to a file in MSPcrunch format (for use with ACT, most likely).
ACT ignores sequence names in the crunch file, and just looks at the numbers.
To make a compatible file, the coords all must be shifted appropriately, which
can be done by providing both the ref_fai and qry_fai options.
Both or neither of these must be used, otherwise an error will be thrown.'''
fai_files = {ref_fai, qry_fai}
if None in fai_files and len(fai_files) != 1:
print(fai_files)
raise Error('Error in convert_to_msp_crunch. Must use both of ref_fai and qry_fai, or neither of them')
if ref_fai is not None:
assert qry_fai is not None
ref_offsets = pyfastaq.tasks.length_offsets_from_fai(ref_fai)
qry_offsets = pyfastaq.tasks.length_offsets_from_fai(qry_fai)
file_reader = reader(infile)
f_out = pyfastaq.utils.open_file_write(outfile)
for aln in file_reader:
if ref_fai is not None:
aln.ref_start += ref_offsets[aln.ref_name]
aln.ref_end += ref_offsets[aln.ref_name]
aln.qry_start += qry_offsets[aln.qry_name]
aln.qry_end += qry_offsets[aln.qry_name]
print(aln.to_msp_crunch(), file=f_out)
pyfastaq.utils.close(f_out)
|
Converts a coords file to a file in MSPcrunch format (for use with ACT, most likely).
ACT ignores sequence names in the crunch file, and just looks at the numbers.
To make a compatible file, the coords all must be shifted appropriately, which
can be done by providing both the ref_fai and qry_fai options.
Both or neither of these must be used, otherwise an error will be thrown.
|
entailment
|
def _request(self, method, url, params=None, headers=None, data=None):
"""Common handler for all the HTTP requests."""
if not params:
params = {}
# set default headers
if not headers:
headers = {
'accept': '*/*'
}
if method == 'POST' or method == 'PUT':
headers.update({'Content-Type': 'application/json'})
try:
response = requests.request(method=method, url=self.host + self.key + url, params=params,
headers=headers, data=data)
try:
response.raise_for_status()
response_code = response.status_code
success = True if response_code // 100 == 2 else False
if response.text:
try:
data = response.json()
except ValueError:
data = response.content
else:
data = ''
response_headers = response.headers
return BurpResponse(success=success, response_code=response_code, data=data,
response_headers=response_headers)
except ValueError as e:
return BurpResponse(success=False, message="JSON response could not be decoded {}.".format(e))
except requests.exceptions.HTTPError as e:
if response.status_code == 400:
return BurpResponse(success=False, response_code=400, message='Bad Request')
else:
return BurpResponse(
message='There was an error while handling the request. {}'.format(response.content),
success=False)
except Exception as e:
return BurpResponse(success=False, message='Eerror is %s' % e)
|
Common handler for all the HTTP requests.
|
entailment
|
def user_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a user profile. Defaults to linking to
Github profiles, but the profile URIS can be configured via the
``issues_user_uri`` config value.
Examples: ::
:user:`sloria`
Anchor text also works: ::
:user:`Steven Loria <sloria>`
"""
options = options or {}
content = content or []
has_explicit_title, title, target = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
config = inliner.document.settings.env.app.config
if config.issues_user_uri:
ref = config.issues_user_uri.format(user=target)
else:
ref = "https://github.com/{0}".format(target)
if has_explicit_title:
text = title
else:
text = "@{0}".format(target)
link = nodes.reference(text=text, refuri=ref, **options)
return [link], []
|
Sphinx role for linking to a user profile. Defaults to linking to
Github profiles, but the profile URIS can be configured via the
``issues_user_uri`` config value.
Examples: ::
:user:`sloria`
Anchor text also works: ::
:user:`Steven Loria <sloria>`
|
entailment
|
def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a CVE on https://cve.mitre.org.
Examples: ::
:cve:`CVE-2018-17175`
"""
options = options or {}
content = content or []
has_explicit_title, title, target = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
ref = "https://cve.mitre.org/cgi-bin/cvename.cgi?name={0}".format(target)
text = title if has_explicit_title else target
link = nodes.reference(text=text, refuri=ref, **options)
return [link], []
|
Sphinx role for linking to a CVE on https://cve.mitre.org.
Examples: ::
:cve:`CVE-2018-17175`
|
entailment
|
def list_line(self, line):
"""
Write the given iterable of values (line) to the file as items on the
same line. Any argument that stringifies to a string legal as a TSV data
item can be written.
Does not copy the line or build a big string in memory.
"""
if len(line) == 0:
return
self.stream.write(str(line[0]))
for item in line[1:]:
self.stream.write("\t")
self.stream.write(str(item))
self.stream.write("\n")
|
Write the given iterable of values (line) to the file as items on the
same line. Any argument that stringifies to a string legal as a TSV data
item can be written.
Does not copy the line or build a big string in memory.
|
entailment
|
def prepare(doc):
""" Parse metadata to obtain list of mustache templates,
then load those templates.
"""
doc.mustache_files = doc.get_metadata('mustache')
if isinstance(doc.mustache_files, basestring): # process single YAML value stored as string
if not doc.mustache_files:
doc.mustache_files = None # switch empty string back to None
else:
doc.mustache_files = [ doc.mustache_files ] # put non-empty string in list
# with open('debug.txt', 'a') as the_file:
# the_file.write(str(doc.mustache_files))
# the_file.write('\n')
if doc.mustache_files is not None:
doc.mustache_hashes = [yaml.load(open(file, 'r').read()) for file in doc.mustache_files]
doc.mhash = { k: v for mdict in doc.mustache_hashes for k, v in mdict.items() } # combine list of dicts into a single dict
doc.mrenderer = pystache.Renderer(escape=lambda u: u, missing_tags='strict')
else:
doc.mhash = None
|
Parse metadata to obtain list of mustache templates,
then load those templates.
|
entailment
|
def action(elem, doc):
""" Apply combined mustache template to all strings in document.
"""
if type(elem) == Str and doc.mhash is not None:
elem.text = doc.mrenderer.render(elem.text, doc.mhash)
return elem
|
Apply combined mustache template to all strings in document.
|
entailment
|
def get_callback(self, renderer_context):
"""
Determine the name of the callback to wrap around the json output.
"""
request = renderer_context.get('request', None)
params = request and get_query_params(request) or {}
return params.get(self.callback_parameter, self.default_callback)
|
Determine the name of the callback to wrap around the json output.
|
entailment
|
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders into jsonp, wrapping the json output in a callback function.
Clients may set the callback function name using a query parameter
on the URL, for example: ?callback=exampleCallbackName
"""
renderer_context = renderer_context or {}
callback = self.get_callback(renderer_context)
json = super(JSONPRenderer, self).render(data, accepted_media_type,
renderer_context)
return callback.encode(self.charset) + b'(' + json + b');'
|
Renders into jsonp, wrapping the json output in a callback function.
Clients may set the callback function name using a query parameter
on the URL, for example: ?callback=exampleCallbackName
|
entailment
|
def get(self, measurementId):
"""
Analyses the measurement with the given parameters
:param measurementId:
:return:
"""
logger.info('Loading raw data for ' + measurementId)
measurement = self._measurementController.getMeasurement(measurementId, MeasurementStatus.COMPLETE)
if measurement is not None:
if measurement.inflate():
data = {
name: {
'raw': {
'x': self._jsonify(data.raw('x')),
'y': self._jsonify(data.raw('y')),
'z': self._jsonify(data.raw('z'))
},
'vibration': {
'x': self._jsonify(data.vibration('x')),
'y': self._jsonify(data.vibration('y')),
'z': self._jsonify(data.vibration('z'))
},
'tilt': {
'x': self._jsonify(data.tilt('x')),
'y': self._jsonify(data.tilt('y')),
'z': self._jsonify(data.tilt('z'))
}
}
for name, data in measurement.data.items()
}
return data, 200
else:
return None, 404
else:
return None, 404
|
Analyses the measurement with the given parameters
:param measurementId:
:return:
|
entailment
|
def to_jd(year, week, day):
'''Return Julian day count of given ISO year, week, and day'''
return day + n_weeks(SUN, gregorian.to_jd(year - 1, 12, 28), week)
|
Return Julian day count of given ISO year, week, and day
|
entailment
|
def from_jd(jd):
'''Return tuple of ISO (year, week, day) for Julian day'''
year = gregorian.from_jd(jd)[0]
day = jwday(jd) + 1
dayofyear = ordinal.from_jd(jd)[1]
week = trunc((dayofyear - day + 10) / 7)
# Reset year
if week < 1:
week = weeks_per_year(year - 1)
year = year - 1
# Check that year actually has 53 weeks
elif week == 53 and weeks_per_year(year) != 53:
week = 1
year = year + 1
return year, week, day
|
Return tuple of ISO (year, week, day) for Julian day
|
entailment
|
def weeks_per_year(year):
'''Number of ISO weeks in a year'''
# 53 weeks: any year starting on Thursday and any leap year starting on Wednesday
jan1 = jwday(gregorian.to_jd(year, 1, 1))
if jan1 == THU or (jan1 == WED and isleap(year)):
return 53
else:
return 52
|
Number of ISO weeks in a year
|
entailment
|
def stsci(hdulist):
"""For STScI GEIS files, need to do extra steps."""
instrument = hdulist[0].header.get('INSTRUME', '')
# Update extension header keywords
if instrument in ("WFPC2", "FOC"):
rootname = hdulist[0].header.get('ROOTNAME', '')
filetype = hdulist[0].header.get('FILETYPE', '')
for i in range(1, len(hdulist)):
# Add name and extver attributes to match PyFITS data structure
hdulist[i].name = filetype
hdulist[i]._extver = i
# Add extension keywords for this chip to extension
hdulist[i].header['EXPNAME'] = (rootname, "9 character exposure identifier")
hdulist[i].header['EXTVER']= (i, "extension version number")
hdulist[i].header['EXTNAME'] = (filetype, "extension name")
hdulist[i].header['INHERIT'] = (True, "inherit the primary header")
hdulist[i].header['ROOTNAME'] = (rootname, "rootname of the observation set")
|
For STScI GEIS files, need to do extra steps.
|
entailment
|
def stsci2(hdulist, filename):
"""For STScI GEIS files, need to do extra steps."""
# Write output file name to the primary header
instrument = hdulist[0].header.get('INSTRUME', '')
if instrument in ("WFPC2", "FOC"):
hdulist[0].header['FILENAME'] = filename
|
For STScI GEIS files, need to do extra steps.
|
entailment
|
def readgeis(input):
"""Input GEIS files "input" will be read and a HDUList object will
be returned.
The user can use the writeto method to write the HDUList object to
a FITS file.
"""
global dat
cardLen = fits.Card.length
# input file(s) must be of the form *.??h and *.??d
if input[-1] != 'h' or input[-4] != '.':
raise "Illegal input GEIS file name %s" % input
data_file = input[:-1]+'d'
_os = sys.platform
if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
bytes_per_line = cardLen+1
else:
raise "Platform %s is not supported (yet)." % _os
geis_fmt = {'REAL':'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'}
end_card = 'END'+' '* (cardLen-3)
# open input file
im = open(input)
# Generate the primary HDU
cards = []
while 1:
line = im.read(bytes_per_line)[:cardLen]
line = line[:8].upper() + line[8:]
if line == end_card:
break
cards.append(fits.Card.fromstring(line))
phdr = fits.Header(cards)
im.close()
_naxis0 = phdr.get('NAXIS', 0)
_naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
_naxis.insert(0, _naxis0)
_bitpix = phdr['BITPIX']
_psize = phdr['PSIZE']
if phdr['DATATYPE'][:4] == 'REAL':
_bitpix = -_bitpix
if _naxis0 > 0:
size = reduce(lambda x,y:x*y, _naxis[1:])
data_size = abs(_bitpix) * size // 8
else:
data_size = 0
group_size = data_size + _psize // 8
# decode the group parameter definitions,
# group parameters will become extension header
groups = phdr['GROUPS']
gcount = phdr['GCOUNT']
pcount = phdr['PCOUNT']
formats = []
bools = []
floats = []
_range = range(1, pcount+1)
key = [phdr['PTYPE'+str(j)] for j in _range]
comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
# delete group parameter definition header keywords
_list = ['PTYPE'+str(j) for j in _range] + \
['PDTYPE'+str(j) for j in _range] + \
['PSIZE'+str(j) for j in _range] + \
['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
# Construct record array formats for the group parameters
# as interpreted from the Primary header file
for i in range(1, pcount+1):
ptype = key[i-1]
pdtype = phdr['PDTYPE'+str(i)]
star = pdtype.find('*')
_type = pdtype[:star]
_bytes = pdtype[star+1:]
# collect boolean keywords since they need special attention later
if _type == 'LOGICAL':
bools.append(i)
if pdtype == 'REAL*4':
floats.append(i)
fmt = geis_fmt[_type] + _bytes
formats.append((ptype,fmt))
_shape = _naxis[1:]
_shape.reverse()
_code = fits.BITPIX2DTYPE[_bitpix]
_bscale = phdr.get('BSCALE', 1)
_bzero = phdr.get('BZERO', 0)
if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
_uint16 = 1
_bzero = 32768
else:
_uint16 = 0
# delete from the end, so it will not conflict with previous delete
for i in range(len(phdr)-1, -1, -1):
if phdr.cards[i].keyword in _list:
del phdr[i]
# clean up other primary header keywords
phdr['SIMPLE'] = True
phdr['BITPIX'] = 16
phdr['GROUPS'] = False
_after = 'NAXIS'
if _naxis0 > 0:
_after += str(_naxis0)
phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after)
phdr.set('NEXTEND', value=gcount, comment="Number of standard extensions")
hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=None)])
# Use copy-on-write for all data types since byteswap may be needed
# in some platforms.
f1 = open(data_file, mode='rb')
dat = f1.read()
# dat = memmap(data_file, mode='c')
hdulist.mmobject = dat
errormsg = ""
loc = 0
for k in range(gcount):
ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
ext_dat = ext_dat.reshape(_shape)
if _uint16:
ext_dat += _bzero
# Check to see whether there are any NaN's or infs which might indicate
# a byte-swapping problem, such as being written out on little-endian
# and being read in on big-endian or vice-versa.
if _code.find('float') >= 0 and \
(numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had floating point data values =\n"
errormsg += "= of NaN and/or Inf. =\n"
errormsg += "===================================\n"
elif _code.find('int') >= 0:
# Check INT data for max values
ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
if ext_dat_exp.max() == int(_bitpix) - 1:
# Potential problems with byteswapping
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had integer data values =\n"
errormsg += "= with maximum bitvalues. =\n"
errormsg += "===================================\n"
ext_hdu = fits.ImageHDU(data=ext_dat)
rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)
loc += group_size
# Create separate PyFITS Card objects for each entry in 'rec'
for i in range(1, pcount+1):
#val = rec.field(i-1)[0]
val = rec[0][i-1]
if val.dtype.kind == 'S':
val = val.decode('ascii')
if i in bools:
if val:
val = True
else:
val = False
if i in floats:
# use fromstring, format in Card is deprecated in pyfits 0.9
_str = '%-8s= %20.7G / %s' % (key[i-1], val, comm[i-1])
_card = fits.Card.fromstring(_str)
else:
_card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1])
ext_hdu.header.append(_card)
# deal with bscale/bzero
if (_bscale != 1 or _bzero != 0):
ext_hdu.header['BSCALE'] = _bscale
ext_hdu.header['BZERO'] = _bzero
hdulist.append(ext_hdu)
if errormsg != "":
errormsg += "===================================\n"
errormsg += "= This file may have been =\n"
errormsg += "= written out on a platform =\n"
errormsg += "= with a different byte-order. =\n"
errormsg += "= =\n"
errormsg += "= Please verify that the values =\n"
errormsg += "= are correct or apply the =\n"
errormsg += "= '.byteswap()' method. =\n"
errormsg += "===================================\n"
print(errormsg)
f1.close()
stsci(hdulist)
return hdulist
|
Input GEIS files "input" will be read and a HDUList object will
be returned.
The user can use the writeto method to write the HDUList object to
a FITS file.
|
entailment
|
def parse_path(f1, f2):
"""Parse two input arguments and return two lists of file names"""
import glob
# if second argument is missing or is a wild card, point it
# to the current directory
f2 = f2.strip()
if f2 == '' or f2 == '*':
f2 = './'
# if the first argument is a directory, use all GEIS files
if os.path.isdir(f1):
f1 = os.path.join(f1, '*.??h')
list1 = glob.glob(f1)
list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.']
# if the second argument is a directory, use file names in the
# first argument to construct file names, i.e.
# abc.xyh will be converted to abc_xyf.fits
if os.path.isdir(f2):
list2 = []
for file in list1:
name = os.path.split(file)[-1]
fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits'
list2.append(os.path.join(f2, fitsname))
else:
list2 = [s.strip() for s in f2.split(",")]
if list1 == [] or list2 == []:
err_msg = ""
if list1 == []:
err_msg += "Input files `{:s}` not usable/available. ".format(f1)
if list2 == []:
err_msg += "Input files `{:s}` not usable/available. ".format(f2)
raise IOError(err_msg)
else:
return list1, list2
|
Parse two input arguments and return two lists of file names
|
entailment
|
def parseinput(inputlist,outputname=None, atfile=None):
"""
Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob
"""
# Initalize some variables
files = [] # list used to store names of input files
newoutputname = outputname # Outputname returned to calling program.
# The value of outputname is only changed
# if it had a value of 'None' on input.
# We can use irafglob to parse the input. If the input wasn't
# an association table, it needs to be either a wildcard, '@' file,
# or comma seperated list.
files = irafglob(inputlist, atfile=atfile)
# Now that we have expanded the inputlist into a python list
# containing the list of input files, it is necessary to examine
# each of the files to make sure none of them are association tables.
#
# If an association table is found, the entries should be read
# Determine if the input is an association table
for file in files:
if (checkASN(file) == True):
# Create a list to store the files extracted from the
# association tiable
assoclist = []
# The input is an association table
try:
# Open the association table
assocdict = readASNTable(file, None, prodonly=False)
except:
errorstr = "###################################\n"
errorstr += "# #\n"
errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n"
errorstr += str(file)+'\n'
errorstr += "# DURING FILE PARSING. #\n"
errorstr += "# #\n"
errorstr += "# Please determine if the file is #\n"
errorstr += "# in the current directory and #\n"
errorstr += "# that it has been properly #\n"
errorstr += "# formatted. #\n"
errorstr += "# #\n"
errorstr += "# This error message is being #\n"
errorstr += "# generated from within the #\n"
errorstr += "# parseinput.py module. #\n"
errorstr += "# #\n"
errorstr += "###################################\n"
raise ValueError(errorstr)
# Extract the output name from the association table if None
# was provided on input.
if outputname is None:
newoutputname = assocdict['output']
# Loop over the association dictionary to extract the input
# file names.
for f in assocdict['order']:
assoclist.append(fileutil.buildRootname(f))
# Remove the name of the association table from the list of files
files.remove(file)
# Append the list of filenames generated from the association table
# to the master list of input files.
files.extend(assoclist)
# Return the list of the input files and the output name if provided in an association.
return files, newoutputname
|
Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob
|
entailment
|
def checkASN(filename):
"""
Determine if the filename provided to the function belongs to
an association.
Parameters
----------
filename: string
Returns
-------
validASN : boolean value
"""
# Extract the file extn type:
extnType = filename[filename.rfind('_')+1:filename.rfind('.')]
# Determine if this extn name is valid for an assocation file
if isValidAssocExtn(extnType):
return True
else:
return False
|
Determine if the filename provided to the function belongs to
an association.
Parameters
----------
filename: string
Returns
-------
validASN : boolean value
|
entailment
|
def countinputs(inputlist):
"""
Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input
"""
# Initialize return values
numInputs = 0
numASNfiles = 0
# User irafglob to count the number of inputfiles
files = irafglob(inputlist, atfile=None)
# Use the "len" ufunc to count the number of entries in the list
numInputs = len(files)
# Loop over the list and see if any of the entries are association files
for file in files:
if (checkASN(file) == True):
numASNfiles += 1
return numInputs,numASNfiles
|
Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input
|
entailment
|
def summary(logfile, time_format):
"show a summary of all projects"
def output(summary):
width = max([len(p[0]) for p in summary]) + 3
print '\n'.join([
"%s%s%s" % (p[0], ' ' * (width - len(p[0])),
colored(minutes_to_txt(p[1]), 'red')) for p in summary])
output(server.summarize(read(logfile, time_format, only_elapsed=True)))
|
show a summary of all projects
|
entailment
|
def status(logfile, time_format):
"show current status"
try:
r = read(logfile, time_format)[-1]
if r[1][1]:
return summary(logfile, time_format)
else:
print "working on %s" % colored(r[0], attrs=['bold'])
print " since %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(now(), time_format), 'green')
print " => %s elapsed" % colored(time_elapsed(r[1][0]), 'red')
except IndexError:
return cmdapp.help()
|
show current status
|
entailment
|
def start(project, logfile, time_format):
"start tracking for <project>"
records = read(logfile, time_format)
if records and not records[-1][1][1]:
print "error: there is a project already active"
return
write(server.start(project, records), logfile, time_format)
print "starting work on %s" % colored(project, attrs=['bold'])
print " at %s" % colored(server.date_to_txt(now(), time_format), 'green')
|
start tracking for <project>
|
entailment
|
def stop(logfile, time_format):
"stop tracking for the active project"
def save_and_output(records):
records = server.stop(records)
write(records, logfile, time_format)
def output(r):
print "worked on %s" % colored(r[0], attrs=['bold'])
print " from %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(r[1][1], time_format), 'green')
print " => %s elapsed" % colored(
time_elapsed(r[1][0], r[1][1]), 'red')
output(records[-1])
save_and_output(read(logfile, time_format))
|
stop tracking for the active project
|
entailment
|
def parse(logfile, time_format):
"parses a stream with text formatted as a Timed logfile and shows a summary"
records = [server.record_from_txt(line, only_elapsed=True,
time_format=time_format) for line in sys.stdin.readlines()]
# TODO: make this code better.
def output(summary):
width = max([len(p[0]) for p in summary]) + 3
print '\n'.join([
"%s%s%s" % (p[0], ' ' * (width - len(p[0])),
colored(minutes_to_txt(p[1]), 'red')) for p in summary])
output(server.summarize(records))
|
parses a stream with text formatted as a Timed logfile and shows a summary
|
entailment
|
def projects(logfile, time_format):
"prints a newline-separated list of all projects"
print '\n'.join(server.list_projects(read(logfile, time_format)))
|
prints a newline-separated list of all projects
|
entailment
|
def getLTime():
"""Returns a formatted string with the current local time."""
_ltime = _time.localtime(_time.time())
tlm_str = _time.strftime('%H:%M:%S (%d/%m/%Y)', _ltime)
return tlm_str
|
Returns a formatted string with the current local time.
|
entailment
|
def getDate():
"""Returns a formatted string with the current date."""
_ltime = _time.localtime(_time.time())
date_str = _time.strftime('%Y-%m-%dT%H:%M:%S',_ltime)
return date_str
|
Returns a formatted string with the current date.
|
entailment
|
def convertDate(date):
"""Convert DATE string into a decimal year."""
d, t = date.split('T')
return decimal_date(d, timeobs=t)
|
Convert DATE string into a decimal year.
|
entailment
|
def decimal_date(dateobs, timeobs=None):
"""Convert DATE-OBS (and optional TIME-OBS) into a decimal year."""
year, month, day = dateobs.split('-')
if timeobs is not None:
hr, min, sec = timeobs.split(':')
else:
hr, min, sec = 0, 0, 0
rdate = datetime.datetime(int(year), int(month), int(day), int(hr),
int(min), int(sec))
dday = (float(rdate.strftime("%j")) + rdate.hour / 24.0 +
rdate.minute / (60. * 24) + rdate.second / (3600 * 24.)) / 365.25
ddate = int(year) + dday
return ddate
|
Convert DATE-OBS (and optional TIME-OBS) into a decimal year.
|
entailment
|
def interpretDQvalue(input):
"""
Converts an integer 'input' into its component bit values as a list of
power of 2 integers.
For example, the bit value 1027 would return [1, 2, 1024]
"""
nbits = 16
# We will only support integer values up to 2**128
for iexp in [16, 32, 64, 128]:
# Find out whether the input value is less than 2**iexp
if (input // (2 ** iexp)) == 0:
# when it finally is, we have identified how many bits can be used to
# describe this input bitvalue
nbits = iexp
break
# Find out how 'dtype' values are described on this machine
a = np.zeros(1, dtype='int16')
atype_descr = a.dtype.descr[0][1]
# Use this description to build the description we need for our input integer
dtype_str = atype_descr[:2] + str(nbits // 8)
result = np.zeros(nbits + 1, dtype=dtype_str)
# For each bit, determine whether it has been set in the input value or not
for n in range(nbits + 1):
i = 2 ** n
if input & i > 0:
# record which bit has been set as the power-of-2 integer
result[n] = i
# Return the non-zero unique values as a Python list
return np.delete(np.unique(result), 0).tolist()
|
Converts an integer 'input' into its component bit values as a list of
power of 2 integers.
For example, the bit value 1027 would return [1, 2, 1024]
|
entailment
|
def isFits(input):
"""
Returns
--------
isFits: tuple
An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and
``fitstype`` are specified as:
- ``isfits``: True|False
- ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
Notes
-----
Input images which do not have a valid FITS filename will automatically
result in a return of (False, None).
In the case that the input has a valid FITS filename but runs into some
error upon opening, this routine will raise that exception for the calling
routine/user to handle.
"""
isfits = False
fitstype = None
names = ['fits', 'fit', 'FITS', 'FIT']
#determine if input is a fits file based on extension
# Only check type of FITS file if filename ends in valid FITS string
f = None
fileclose = False
if isinstance(input, fits.HDUList):
isfits = True
f = input
else:
isfits = True in [input.endswith(l) for l in names]
# if input is a fits file determine what kind of fits it is
#waiver fits len(shape) == 3
if isfits:
if f is None:
try:
f = fits.open(input, mode='readonly')
fileclose = True
except Exception:
if f is not None:
f.close()
raise
data0 = f[0].data
if data0 is not None:
try:
if isinstance(f[1], fits.TableHDU):
fitstype = 'waiver'
except IndexError:
fitstype = 'simple'
else:
fitstype = 'mef'
if fileclose:
f.close()
return isfits, fitstype
|
Returns
--------
isFits: tuple
An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and
``fitstype`` are specified as:
- ``isfits``: True|False
- ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
Notes
-----
Input images which do not have a valid FITS filename will automatically
result in a return of (False, None).
In the case that the input has a valid FITS filename but runs into some
error upon opening, this routine will raise that exception for the calling
routine/user to handle.
|
entailment
|
def verifyWriteMode(files):
"""
Checks whether files are writable. It is up to the calling routine to raise
an Exception, if desired.
This function returns True, if all files are writable and False, if any are
not writable. In addition, for all files found to not be writable, it will
print out the list of names of affected files.
"""
# Start by insuring that input is a list of filenames,
# if only a single filename has been given as input,
# convert it to a list with len == 1.
if not isinstance(files, list):
files = [files]
# Keep track of the name of each file which is not writable
not_writable = []
writable = True
# Check each file in input list
for fname in files:
try:
f = open(fname,'a')
f.close()
del f
except:
not_writable.append(fname)
writable = False
if not writable:
print('The following file(s) do not have write permission!')
for fname in not_writable:
print(' ', fname)
return writable
|
Checks whether files are writable. It is up to the calling routine to raise
an Exception, if desired.
This function returns True, if all files are writable and False, if any are
not writable. In addition, for all files found to not be writable, it will
print out the list of names of affected files.
|
entailment
|
def getFilterNames(header, filternames=None):
"""
Returns a comma-separated string of filter names extracted from the input
header (PyFITS header object). This function has been hard-coded to
support the following instruments:
ACS, WFPC2, STIS
This function relies on the 'INSTRUME' keyword to define what instrument
has been used to generate the observation/header.
The 'filternames' parameter allows the user to provide a list of keyword
names for their instrument, in the case their instrument is not supported.
"""
# Define the keyword names for each instrument
_keydict = {
'ACS': ['FILTER1', 'FILTER2'],
'WFPC2': ['FILTNAM1', 'FILTNAM2'],
'STIS': ['OPT_ELEM', 'FILTER'],
'NICMOS': ['FILTER', 'FILTER2'],
'WFC3': ['FILTER', 'FILTER2']
}
# Find out what instrument the input header came from, based on the
# 'INSTRUME' keyword
if 'INSTRUME' in header:
instrument = header['INSTRUME']
else:
raise ValueError('Header does not contain INSTRUME keyword.')
# Check to make sure this instrument is supported in _keydict
if instrument in _keydict:
_filtlist = _keydict[instrument]
else:
_filtlist = filternames
# At this point, we know what keywords correspond to the filter names
# in the header. Now, get the values associated with those keywords.
# Build a list of all filter name values, with the exception of the
# blank keywords. Values containing 'CLEAR' or 'N/A' are valid.
_filter_values = []
for _key in _filtlist:
if _key in header:
_val = header[_key]
else:
_val = ''
if _val.strip() != '':
_filter_values.append(header[_key])
# Return the comma-separated list
return ','.join(_filter_values)
|
Returns a comma-separated string of filter names extracted from the input
header (PyFITS header object). This function has been hard-coded to
support the following instruments:
ACS, WFPC2, STIS
This function relies on the 'INSTRUME' keyword to define what instrument
has been used to generate the observation/header.
The 'filternames' parameter allows the user to provide a list of keyword
names for their instrument, in the case their instrument is not supported.
|
entailment
|
def buildNewRootname(filename, extn=None, extlist=None):
"""
Build rootname for a new file.
Use 'extn' for new filename if given, does NOT append a suffix/extension at
all.
Does NOT check to see if it exists already. Will ALWAYS return a new
filename.
"""
# Search known suffixes to replace ('_crj.fits',...)
_extlist = copy.deepcopy(EXTLIST)
# Also, add a default where '_dth.fits' replaces
# whatever extension was there ('.fits','.c1h',...)
#_extlist.append('.')
# Also append any user-specified extensions...
if extlist:
_extlist += extlist
if isinstance(filename, fits.HDUList):
try:
filename = filename.filename()
except:
raise ValueError("Can't determine the filename of an waivered HDUList object.")
for suffix in _extlist:
_indx = filename.find(suffix)
if _indx > 0: break
if _indx < 0:
# default to entire rootname
_indx = len(filename)
if extn is None:
extn = ''
return filename[:_indx] + extn
|
Build rootname for a new file.
Use 'extn' for new filename if given, does NOT append a suffix/extension at
all.
Does NOT check to see if it exists already. Will ALWAYS return a new
filename.
|
entailment
|
def buildRootname(filename, ext=None):
"""
Build a new rootname for an existing file and given extension.
Any user supplied extensions to use for searching for file need to be
provided as a list of extensions.
Examples
--------
::
>>> rootname = buildRootname(filename, ext=['_dth.fits']) # doctest: +SKIP
"""
if filename in ['' ,' ', None]:
return None
fpath, fname = os.path.split(filename)
if ext is not None and '_' in ext[0]:
froot = os.path.splitext(fname)[0].split('_')[0]
else:
froot = fname
if fpath in ['', ' ', None]:
fpath = os.curdir
# Get complete list of filenames from current directory
flist = os.listdir(fpath)
#First, assume given filename is complete and verify
# it exists...
rootname = None
for name in flist:
if name == froot:
rootname = froot
break
elif name == froot + '.fits':
rootname = froot + '.fits'
break
# If we have an incomplete filename, try building a default
# name and seeing if it exists...
#
# Set up default list of suffix/extensions to add to rootname
_extlist = []
for extn in EXTLIST:
_extlist.append(extn)
if rootname is None:
# Add any user-specified extension to list of extensions...
if ext is not None:
for i in ext:
_extlist.insert(0,i)
# loop over all extensions looking for a filename that matches...
for extn in _extlist:
# Start by looking for filename with exactly
# the same case a provided in ASN table...
rname = froot + extn
for name in flist:
if rname == name:
rootname = name
break
if rootname is None:
# Try looking for all lower-case filename
# instead of a mixed-case filename as required
# by the pipeline.
rname = froot.lower() + extn
for name in flist:
if rname == name:
rootname = name
break
if rootname is not None:
break
# If we still haven't found the file, see if we have the
# info to build one...
if rootname is None and ext is not None:
# Check to see if we have a full filename to start with...
_indx = froot.find('.')
if _indx > 0:
rootname = froot[:_indx] + ext[0]
else:
rootname = froot + ext[0]
if fpath not in ['.', '', ' ', None]:
rootname = os.path.join(fpath, rootname)
# It will be up to the calling routine to verify
# that a valid rootname, rather than 'None', was returned.
return rootname
|
Build a new rootname for an existing file and given extension.
Any user supplied extensions to use for searching for file need to be
provided as a list of extensions.
Examples
--------
::
>>> rootname = buildRootname(filename, ext=['_dth.fits']) # doctest: +SKIP
|
entailment
|
def getKeyword(filename, keyword, default=None, handle=None):
"""
General, write-safe method for returning a keyword value from the header of
a IRAF recognized image.
Returns the value as a string.
"""
# Insure that there is at least 1 extension specified...
if filename.find('[') < 0:
filename += '[0]'
_fname, _extn = parseFilename(filename)
if not handle:
# Open image whether it is FITS or GEIS
_fimg = openImage(_fname)
else:
# Use what the user provides, after insuring
# that it is a proper PyFITS object.
if isinstance(handle, fits.HDUList):
_fimg = handle
else:
raise ValueError('Handle must be %r object!' % fits.HDUList)
# Address the correct header
_hdr = getExtn(_fimg, _extn).header
try:
value = _hdr[keyword]
except KeyError:
_nextn = findKeywordExtn(_fimg, keyword)
try:
value = _fimg[_nextn].header[keyword]
except KeyError:
value = ''
if not handle:
_fimg.close()
del _fimg
if value == '':
if default is None:
value = None
else:
value = default
# NOTE: Need to clean up the keyword.. Occasionally the keyword value
# goes right up to the "/" FITS delimiter, and iraf.keypar is incapable
# of realizing this, so it incorporates "/" along with the keyword value.
# For example, after running "pydrizzle" on the image "j8e601bkq_flt.fits",
# the CD keywords look like this:
#
# CD1_1 = 9.221627430999639E-06/ partial of first axis coordinate w.r.t. x
# CD1_2 = -1.0346992614799E-05 / partial of first axis coordinate w.r.t. y
#
# so for CD1_1, iraf.keypar returns:
# "9.221627430999639E-06/"
#
# So, the following piece of code CHECKS for this and FIXES the string,
# very simply by removing the last character if it is a "/".
# This fix courtesy of Anton Koekemoer, 2002.
elif isinstance(value, string_types):
if value[-1:] == '/':
value = value[:-1]
return value
|
General, write-safe method for returning a keyword value from the header of
a IRAF recognized image.
Returns the value as a string.
|
entailment
|
def getHeader(filename, handle=None):
"""
Return a copy of the PRIMARY header, along with any group/extension header
for this filename specification.
"""
_fname, _extn = parseFilename(filename)
# Allow the user to provide an already opened PyFITS object
# to derive the header from...
#
if not handle:
# Open image whether it is FITS or GEIS
_fimg = openImage(_fname, mode='readonly')
else:
# Use what the user provides, after insuring
# that it is a proper PyFITS object.
if isinstance(handle, fits.HDUList):
_fimg = handle
else:
raise ValueError('Handle must be a %r object!' % fits.HDUList)
_hdr = _fimg['PRIMARY'].header.copy()
# if the data is not in the primary array delete NAXIS
# so that the correct value is read from the extension header
if _hdr['NAXIS'] == 0:
del _hdr['NAXIS']
if not (_extn is None or (_extn.isdigit() and int(_extn) == 0)):
# Append correct extension/chip/group header to PRIMARY...
#for _card in getExtn(_fimg,_extn).header.ascard:
#_hdr.ascard.append(_card)
for _card in getExtn(_fimg, _extn).header.cards:
_hdr.append(_card)
if not handle:
# Close file handle now...
_fimg.close()
del _fimg
return _hdr
|
Return a copy of the PRIMARY header, along with any group/extension header
for this filename specification.
|
entailment
|
def updateKeyword(filename, key, value,show=yes):
"""Add/update keyword to header with given value."""
_fname, _extn = parseFilename(filename)
# Open image whether it is FITS or GEIS
_fimg = openImage(_fname, mode='update')
# Address the correct header
_hdr = getExtn(_fimg, _extn).header
# Assign a new value or add new keyword here.
try:
_hdr[key] = value
except KeyError:
if show:
print('Adding new keyword ', key, '=', value)
_hdr[key] = value
# Close image
_fimg.close()
del _fimg
|
Add/update keyword to header with given value.
|
entailment
|
def buildFITSName(geisname):
"""Build a new FITS filename for a GEIS input image."""
# User wants to make a FITS copy and update it...
_indx = geisname.rfind('.')
_fitsname = geisname[:_indx] + '_' + geisname[_indx + 1:-1] + 'h.fits'
return _fitsname
|
Build a new FITS filename for a GEIS input image.
|
entailment
|
def openImage(filename, mode='readonly', memmap=False, writefits=True,
clobber=True, fitsname=None):
"""
Opens file and returns PyFITS object. Works on both FITS and GEIS
formatted images.
Notes
-----
If a GEIS or waivered FITS image is used as input, it will convert it to a
MEF object and only if ``writefits = True`` will write it out to a file. If
``fitsname = None``, the name used to write out the new MEF file will be
created using `buildFITSName`.
Parameters
----------
filename: str
name of input file
mode: str
mode for opening file based on PyFITS `mode` parameter values
memmap: bool
switch for using memory mapping, `False` for no, `True` for yes
writefits: bool
if `True`, will write out GEIS as multi-extension FITS
and return handle to that opened GEIS-derived MEF file
clobber: bool
overwrite previously written out GEIS-derived MEF file
fitsname: str
name to use for GEIS-derived MEF file,
if None and writefits==`True`, will use 'buildFITSName()' to generate one
"""
if not isinstance(filename, fits.HDUList):
# Insure that the filename is always fully expanded
# This will not affect filenames without paths or
# filenames specified with extensions.
filename = osfn(filename)
# Extract the rootname and extension specification
# from input image name
_fname, _iextn = parseFilename(filename)
else:
_fname = filename
# Check whether we have a FITS file and if so what type
isfits, fitstype = isFits(_fname)
if isfits:
if fitstype != 'waiver':
# Open the FITS file
fimg = fits.open(_fname, mode=mode, memmap=memmap)
return fimg
else:
fimg = convertwaiveredfits.convertwaiveredfits(_fname)
#check for the existence of a data quality file
_dqname = buildNewRootname(_fname, extn='_c1f.fits')
dqexists = os.path.exists(_dqname)
if dqexists:
try:
dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
dqfitsname = buildNewRootname(_dqname, extn='_c1h.fits')
except:
print("Could not read data quality file %s" % _dqname)
if writefits:
# User wants to make a FITS copy and update it
# using the filename they have provided
if fitsname is None:
rname = buildNewRootname(_fname)
fitsname = buildNewRootname(rname, extn='_c0h.fits')
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out WAIVERED as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out WAIVERED as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
# Image re-written as MEF, now it needs its WCS updated
#updatewcs.updatewcs(fitsname)
fimg = fits.open(fitsname, mode=mode, memmap=memmap)
# Return handle for use by user
return fimg
else:
# Input was specified as a GEIS image, but no FITS copy
# exists. Read it in with 'readgeis' and make a copy
# then open the FITS copy...
try:
# Open as a GEIS image for reading only
fimg = readgeis.readgeis(_fname)
except:
raise IOError("Could not open GEIS input: %s" % _fname)
#check for the existence of a data quality file
_dqname = buildNewRootname(_fname, extn='.c1h')
dqexists = os.path.exists(_dqname)
if dqexists:
try:
dqfile = readgeis.readgeis(_dqname)
dqfitsname = buildFITSName(_dqname)
except:
print("Could not read data quality file %s" % _dqname)
# Check to see if user wanted to update GEIS header.
# or write out a multi-extension FITS file and return a handle to it
if writefits:
# User wants to make a FITS copy and update it
# using the filename they have provided
if fitsname is None:
fitsname = buildFITSName(_fname)
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out GEIS as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out GEIS as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
# Image re-written as MEF, now it needs its WCS updated
#updatewcs.updatewcs(fitsname)
fimg = fits.open(fitsname, mode=mode, memmap=memmap)
# Return handle for use by user
return fimg
|
Opens file and returns PyFITS object. Works on both FITS and GEIS
formatted images.
Notes
-----
If a GEIS or waivered FITS image is used as input, it will convert it to a
MEF object and only if ``writefits = True`` will write it out to a file. If
``fitsname = None``, the name used to write out the new MEF file will be
created using `buildFITSName`.
Parameters
----------
filename: str
name of input file
mode: str
mode for opening file based on PyFITS `mode` parameter values
memmap: bool
switch for using memory mapping, `False` for no, `True` for yes
writefits: bool
if `True`, will write out GEIS as multi-extension FITS
and return handle to that opened GEIS-derived MEF file
clobber: bool
overwrite previously written out GEIS-derived MEF file
fitsname: str
name to use for GEIS-derived MEF file,
if None and writefits==`True`, will use 'buildFITSName()' to generate one
|
entailment
|
def parseFilename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
"""
# Parse out any extension specified in filename
_indx = filename.find('[')
if _indx > 0:
# Read extension name provided
_fname = filename[:_indx]
_extn = filename[_indx + 1:-1]
else:
_fname = filename
_extn = None
return _fname, _extn
|
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
|
entailment
|
def parseExtn(extn=None):
"""
Parse a string representing a qualified fits extension name as in the
output of `parseFilename` and return a tuple ``(str(extname),
int(extver))``, which can be passed to `astropy.io.fits` functions using
the 'ext' kw.
Default return is the first extension in a fits file.
Examples
--------
::
>>> parseExtn('sci, 2')
('sci', 2)
>>> parseExtn('2')
('', 2)
>>> parseExtn('sci')
('sci', 1)
"""
if not extn:
return ('', 0)
try:
lext = extn.split(',')
except:
return ('', 1)
if len(lext) == 1 and lext[0].isdigit():
return ("", int(lext[0]))
elif len(lext) == 2:
return (lext[0], int(lext[1]))
else:
return (lext[0], 1)
|
Parse a string representing a qualified fits extension name as in the
output of `parseFilename` and return a tuple ``(str(extname),
int(extver))``, which can be passed to `astropy.io.fits` functions using
the 'ext' kw.
Default return is the first extension in a fits file.
Examples
--------
::
>>> parseExtn('sci, 2')
('sci', 2)
>>> parseExtn('2')
('', 2)
>>> parseExtn('sci')
('sci', 1)
|
entailment
|
def countExtn(fimg, extname='SCI'):
"""
Return the number of 'extname' extensions, defaulting to counting the
number of SCI extensions.
"""
closefits = False
if isinstance(fimg, string_types):
fimg = fits.open(fimg)
closefits = True
n = 0
for e in fimg:
if 'extname' in e.header and e.header['extname'] == extname:
n += 1
if closefits:
fimg.close()
return n
|
Return the number of 'extname' extensions, defaulting to counting the
number of SCI extensions.
|
entailment
|
def getExtn(fimg, extn=None):
"""
Returns the PyFITS extension corresponding to extension specified in
filename.
Defaults to returning the first extension with data or the primary
extension, if none have data. If a non-existent extension has been
specified, it raises a `KeyError` exception.
"""
# If no extension is provided, search for first extension
# in FITS file with data associated with it.
if extn is None:
# Set up default to point to PRIMARY extension.
_extn = fimg[0]
# then look for first extension with data.
for _e in fimg:
if _e.data is not None:
_extn = _e
break
else:
# An extension was provided, so parse it out...
if repr(extn).find(',') > 1:
if isinstance(extn, tuple):
# We have a tuple possibly created by parseExtn(), so
# turn it into a list for easier manipulation.
_extns = list(extn)
if '' in _extns:
_extns.remove('')
else:
_extns = extn.split(',')
# Two values given for extension:
# for example, 'sci,1' or 'dq,1'
try:
_extn = fimg[_extns[0], int(_extns[1])]
except KeyError:
_extn = None
for e in fimg:
hdr = e.header
if ('extname' in hdr and
hdr['extname'].lower() == _extns[0].lower() and
hdr['extver'] == int(_extns[1])):
_extn = e
break
elif repr(extn).find('/') > 1:
# We are working with GEIS group syntax
_indx = str(extn[:extn.find('/')])
_extn = fimg[int(_indx)]
elif isinstance(extn, string_types):
if extn.strip() == '':
_extn = None # force error since invalid name was provided
# Only one extension value specified...
elif extn.isdigit():
# We only have an extension number specified as a string...
_nextn = int(extn)
else:
# We only have EXTNAME specified...
_nextn = None
if extn.lower() == 'primary':
_nextn = 0
else:
i = 0
for hdu in fimg:
isimg = 'extname' in hdu.header
hdr = hdu.header
if isimg and extn.lower() == hdr['extname'].lower():
_nextn = i
break
i += 1
if _nextn < len(fimg):
_extn = fimg[_nextn]
else:
_extn = None
else:
# Only integer extension number given, or default of 0 is used.
if int(extn) < len(fimg):
_extn = fimg[int(extn)]
else:
_extn = None
if _extn is None:
raise KeyError('Extension %s not found' % extn)
return _extn
|
Returns the PyFITS extension corresponding to extension specified in
filename.
Defaults to returning the first extension with data or the primary
extension, if none have data. If a non-existent extension has been
specified, it raises a `KeyError` exception.
|
entailment
|
def findFile(input):
"""Search a directory for full filename with optional path."""
# If no input name is provided, default to returning 'no'(FALSE)
if not input:
return no
# We use 'osfn' here to insure that any IRAF variables are
# expanded out before splitting out the path...
_fdir, _fname = os.path.split(osfn(input))
if _fdir == '':
_fdir = os.curdir
try:
flist = os.listdir(_fdir)
except OSError:
# handle when requested file in on a disconnect network store
return no
_root, _extn = parseFilename(_fname)
found = no
for name in flist:
if name == _root:
# Check to see if given extension, if any, exists
if _extn is None:
found = yes
continue
else:
_split = _extn.split(',')
_extnum = None
_extver = None
if _split[0].isdigit():
_extname = None
_extnum = int(_split[0])
else:
_extname = _split[0]
if len(_split) > 1:
_extver = int(_split[1])
else:
_extver = 1
f = openImage(_root)
f.close()
if _extnum is not None:
if _extnum < len(f):
found = yes
del f
continue
else:
del f
else:
_fext = findExtname(f, _extname, extver=_extver)
if _fext is not None:
found = yes
del f
continue
return found
|
Search a directory for full filename with optional path.
|
entailment
|
def checkFileExists(filename, directory=None):
"""
Checks to see if file specified exists in current or specified directory.
Default is current directory. Returns 1 if it exists, 0 if not found.
"""
if directory is not None:
fname = os.path.join(directory,filename)
else:
fname = filename
_exist = os.path.exists(fname)
return _exist
|
Checks to see if file specified exists in current or specified directory.
Default is current directory. Returns 1 if it exists, 0 if not found.
|
entailment
|
def copyFile(input, output, replace=None):
"""Copy a file whole from input to output."""
_found = findFile(output)
if not _found or (_found and replace):
shutil.copy2(input, output)
|
Copy a file whole from input to output.
|
entailment
|
def removeFile(inlist):
"""
Utility function for deleting a list of files or a single file.
This function will automatically delete both files of a GEIS image, just
like 'iraf.imdelete'.
"""
if not isinstance(inlist, string_types):
# We do have a list, so delete all filenames in list.
# Treat like a list of full filenames
_ldir = os.listdir('.')
for f in inlist:
# Now, check to see if there are wildcards which need to be expanded
if f.find('*') >= 0 or f.find('?') >= 0:
# We have a wild card specification
regpatt = f.replace('?', '.?')
regpatt = regpatt.replace('*', '.*')
_reg = re.compile(regpatt)
for file in _ldir:
if _reg.match(file):
_remove(file)
else:
# This is just a single filename
_remove(f)
else:
# It must be a string then, so treat as a single filename
_remove(inlist)
|
Utility function for deleting a list of files or a single file.
This function will automatically delete both files of a GEIS image, just
like 'iraf.imdelete'.
|
entailment
|
def findKeywordExtn(ft, keyword, value=None):
"""
This function will return the index of the extension in a multi-extension
FITS file which contains the desired keyword with the given value.
"""
i = 0
extnum = -1
# Search through all the extensions in the FITS object
for chip in ft:
hdr = chip.header
# Check to make sure the extension has the given keyword
if keyword in hdr:
if value is not None:
# If it does, then does the value match the desired value
# MUST use 'str.strip' to match against any input string!
if hdr[keyword].strip() == value:
extnum = i
break
else:
extnum = i
break
i += 1
# Return the index of the extension which contained the
# desired EXTNAME value.
return extnum
|
This function will return the index of the extension in a multi-extension
FITS file which contains the desired keyword with the given value.
|
entailment
|
def findExtname(fimg, extname, extver=None):
"""
Returns the list number of the extension corresponding to EXTNAME given.
"""
i = 0
extnum = None
for chip in fimg:
hdr = chip.header
if 'EXTNAME' in hdr:
if hdr['EXTNAME'].strip() == extname.upper():
if extver is None or hdr['EXTVER'] == extver:
extnum = i
break
i += 1
return extnum
|
Returns the list number of the extension corresponding to EXTNAME given.
|
entailment
|
def rAsciiLine(ifile):
"""Returns the next non-blank line in an ASCII file."""
_line = ifile.readline().strip()
while len(_line) == 0:
_line = ifile.readline().strip()
return _line
|
Returns the next non-blank line in an ASCII file.
|
entailment
|
def listVars(prefix="", equals="\t= ", **kw):
"""List IRAF variables."""
keylist = getVarList()
if len(keylist) == 0:
print('No IRAF variables defined')
else:
keylist.sort()
for word in keylist:
print("%s%s%s%s" % (prefix, word, equals, envget(word)))
|
List IRAF variables.
|
entailment
|
def untranslateName(s):
"""Undo Python conversion of CL parameter or variable name."""
s = s.replace('DOT', '.')
s = s.replace('DOLLAR', '$')
# delete 'PY' at start of name components
if s[:2] == 'PY': s = s[2:]
s = s.replace('.PY', '.')
return s
|
Undo Python conversion of CL parameter or variable name.
|
entailment
|
def envget(var, default=None):
"""Get value of IRAF or OS environment variable."""
if 'pyraf' in sys.modules:
#ONLY if pyraf is already loaded, import iraf into the namespace
from pyraf import iraf
else:
# else set iraf to None so it knows to not use iraf's environment
iraf = None
try:
if iraf:
return iraf.envget(var)
else:
raise KeyError
except KeyError:
try:
return _varDict[var]
except KeyError:
try:
return os.environ[var]
except KeyError:
if default is not None:
return default
elif var == 'TERM':
# Return a default value for TERM
# TERM gets caught as it is found in the default
# login.cl file setup by IRAF.
print("Using default TERM value for session.")
return 'xterm'
else:
raise KeyError("Undefined environment variable `%s'" % var)
|
Get value of IRAF or OS environment variable.
|
entailment
|
def osfn(filename):
"""Convert IRAF virtual path name to OS pathname."""
# Try to emulate the CL version closely:
#
# - expands IRAF virtual file names
# - strips blanks around path components
# - if no slashes or relative paths, return relative pathname
# - otherwise return absolute pathname
if filename is None:
return filename
ename = Expand(filename)
dlist = [part.strip() for part in ename.split(os.sep)]
if len(dlist) == 1 and dlist[0] not in [os.curdir, os.pardir]:
return dlist[0]
# I use str.join instead of os.path.join here because
# os.path.join("","") returns "" instead of "/"
epath = os.sep.join(dlist)
fname = os.path.abspath(epath)
# append '/' if relative directory was at end or filename ends with '/'
if fname[-1] != os.sep and dlist[-1] in ['', os.curdir, os.pardir]:
fname = fname + os.sep
return fname
|
Convert IRAF virtual path name to OS pathname.
|
entailment
|
def defvar(varname):
"""Returns true if CL variable is defined."""
if 'pyraf' in sys.modules:
#ONLY if pyraf is already loaded, import iraf into the namespace
from pyraf import iraf
else:
# else set iraf to None so it knows to not use iraf's environment
iraf = None
if iraf:
_irafdef = iraf.envget(varname)
else:
_irafdef = 0
return varname in _varDict or varname in os.environ or _irafdef
|
Returns true if CL variable is defined.
|
entailment
|
def set(*args, **kw):
"""Set IRAF environment variables."""
if len(args) == 0:
if len(kw) != 0:
# normal case is only keyword,value pairs
for keyword, value in kw.items():
keyword = untranslateName(keyword)
svalue = str(value)
_varDict[keyword] = svalue
else:
# set with no arguments lists all variables (using same format
# as IRAF)
listVars(prefix=" ", equals="=")
else:
# The only other case allowed is the peculiar syntax
# 'set @filename', which only gets used in the zzsetenv.def file,
# where it reads extern.pkg. That file also gets read (in full cl
# mode) by clpackage.cl. I get errors if I read this during
# zzsetenv.def, so just ignore it here...
#
# Flag any other syntax as an error.
if (len(args) != 1 or len(kw) != 0 or
not isinstance(args[0], string_types) or args[0][:1] != '@'):
raise SyntaxError("set requires name=value pairs")
|
Set IRAF environment variables.
|
entailment
|
def show(*args, **kw):
"""Print value of IRAF or OS environment variables."""
if len(kw):
raise TypeError('unexpected keyword argument: %r' % list(kw))
if args:
for arg in args:
print(envget(arg))
else:
# print them all
listVars(prefix=" ", equals="=")
|
Print value of IRAF or OS environment variables.
|
entailment
|
def unset(*args, **kw):
"""
Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables.
"""
if len(kw) != 0:
raise SyntaxError("unset requires a list of variable names")
for arg in args:
if arg in _varDict:
del _varDict[arg]
|
Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables.
|
entailment
|
def Expand(instring, noerror=0):
"""
Expand a string with embedded IRAF variables (IRAF virtual filename).
Allows comma-separated lists. Also uses os.path.expanduser to replace '~'
symbols.
Set the noerror flag to silently replace undefined variables with just the
variable name or null (so Expand('abc$def') = 'abcdef' and
Expand('(abc)def') = 'def'). This is the IRAF behavior, though it is
confusing and hides errors.
"""
# call _expand1 for each entry in comma-separated list
wordlist = instring.split(",")
outlist = []
for word in wordlist:
outlist.append(os.path.expanduser(_expand1(word, noerror=noerror)))
return ",".join(outlist)
|
Expand a string with embedded IRAF variables (IRAF virtual filename).
Allows comma-separated lists. Also uses os.path.expanduser to replace '~'
symbols.
Set the noerror flag to silently replace undefined variables with just the
variable name or null (so Expand('abc$def') = 'abcdef' and
Expand('(abc)def') = 'def'). This is the IRAF behavior, though it is
confusing and hides errors.
|
entailment
|
def _expand1(instring, noerror):
"""Expand a string with embedded IRAF variables (IRAF virtual filename)."""
# first expand names in parentheses
# note this works on nested names too, expanding from the
# inside out (just like IRAF)
mm = __re_var_paren.search(instring)
while mm is not None:
# remove embedded dollar signs from name
varname = mm.group('varname').replace('$','')
if defvar(varname):
varname = envget(varname)
elif noerror:
varname = ""
else:
raise ValueError("Undefined variable `%s' in string `%s'" %
(varname, instring))
instring = instring[:mm.start()] + varname + instring[mm.end():]
mm = __re_var_paren.search(instring)
# now expand variable name at start of string
mm = __re_var_match.match(instring)
if mm is None:
return instring
varname = mm.group('varname')
if varname in ['', ' ', None]:
mm = __re_var_match2.match(instring)
varname = mm.group('varname')
if defvar(varname):
# recursively expand string after substitution
return _expand1(envget(varname) + instring[mm.end():], noerror)
elif noerror:
return _expand1(varname + instring[mm.end():], noerror)
else:
raise ValueError("Undefined variable `%s' in string `%s'" %
(varname, instring))
|
Expand a string with embedded IRAF variables (IRAF virtual filename).
|
entailment
|
def legal_date(year, month, day):
'''Check if this is a legal date in the Julian calendar'''
daysinmonth = month_length(year, month)
if not (0 < day <= daysinmonth):
raise ValueError("Month {} doesn't have a day {}".format(month, day))
return True
|
Check if this is a legal date in the Julian calendar
|
entailment
|
def from_jd(jd):
'''Calculate Julian calendar date from Julian day'''
jd += 0.5
z = trunc(jd)
a = z
b = a + 1524
c = trunc((b - 122.1) / 365.25)
d = trunc(365.25 * c)
e = trunc((b - d) / 30.6001)
if trunc(e < 14):
month = e - 1
else:
month = e - 13
if trunc(month > 2):
year = c - 4716
else:
year = c - 4715
day = b - d - trunc(30.6001 * e)
return (year, month, day)
|
Calculate Julian calendar date from Julian day
|
entailment
|
def to_jd(year, month, day):
'''Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)'''
legal_date(year, month, day)
# Algorithm as given in Meeus, Astronomical Algorithms, Chapter 7, page 61
if month <= 2:
year -= 1
month += 12
return (trunc((365.25 * (year + 4716))) + trunc((30.6001 * (month + 1))) + day) - 1524.5
|
Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)
|
entailment
|
def delay_1(year):
'''Test for delay of start of new year and to avoid'''
# Sunday, Wednesday, and Friday as start of the new year.
months = trunc(((235 * year) - 234) / 19)
parts = 12084 + (13753 * months)
day = trunc((months * 29) + parts / 25920)
if ((3 * (day + 1)) % 7) < 3:
day += 1
return day
|
Test for delay of start of new year and to avoid
|
entailment
|
def delay_2(year):
'''Check for delay in start of new year due to length of adjacent years'''
last = delay_1(year - 1)
present = delay_1(year)
next_ = delay_1(year + 1)
if next_ - present == 356:
return 2
elif present - last == 382:
return 1
else:
return 0
|
Check for delay in start of new year due to length of adjacent years
|
entailment
|
def month_days(year, month):
'''How many days are in a given month of a given year'''
if month > 13:
raise ValueError("Incorrect month index")
# First of all, dispose of fixed-length 29 day months
if month in (IYYAR, TAMMUZ, ELUL, TEVETH, VEADAR):
return 29
# If it's not a leap year, Adar has 29 days
if month == ADAR and not leap(year):
return 29
# If it's Heshvan, days depend on length of year
if month == HESHVAN and (year_days(year) % 10) != 5:
return 29
# Similarly, Kislev varies with the length of year
if month == KISLEV and (year_days(year) % 10) == 3:
return 29
# Nope, it's a 30 day month
return 30
|
How many days are in a given month of a given year
|
entailment
|
def byteswap(input,output=None,clobber=True):
"""Input GEIS files "input" will be read and converted to a new GEIS file
whose byte-order has been swapped from its original state.
Parameters
----------
input - str
Full filename with path of input GEIS image header file
output - str
Full filename with path of output GEIS image header file
If None, a default name will be created as input_swap.??h
clobber - bool
Overwrite any pre-existing output file? [Default: True]
Notes
-----
This function will automatically read and write out the data file using the
GEIS image naming conventions.
"""
global dat
cardLen = fits.Card.length
# input file(s) must be of the form *.??h and *.??d
if input[-1] != 'h' or input[-4] != '.':
raise "Illegal input GEIS file name %s" % input
data_file = input[:-1]+'d'
# Create default output name if no output name was specified by the user
if output is None:
output = input.replace('.','_swap.')
out_data = output[:-1]+'d'
if os.path.exists(output) and not clobber:
errstr = 'Output file already exists! Please remove or rename and start again...'
raise IOError(errstr)
_os = sys.platform
if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
bytes_per_line = cardLen+1
else:
raise "Platform %s is not supported (yet)." % _os
end_card = 'END'+' '* (cardLen-3)
# open input file
im = open(input)
# Generate the primary HDU so we can have access to keywords which describe
# the number of groups and shape of each group's array
#
cards = []
while 1:
line = im.read(bytes_per_line)[:cardLen]
line = line[:8].upper() + line[8:]
if line == end_card:
break
cards.append(fits.Card.fromstring(line))
phdr = fits.Header(cards)
im.close()
_naxis0 = phdr.get('NAXIS', 0)
_naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
_naxis.insert(0, _naxis0)
_bitpix = phdr['BITPIX']
_psize = phdr['PSIZE']
if phdr['DATATYPE'][:4] == 'REAL':
_bitpix = -_bitpix
if _naxis0 > 0:
size = reduce(lambda x,y:x*y, _naxis[1:])
data_size = abs(_bitpix) * size // 8
else:
data_size = 0
group_size = data_size + _psize // 8
# decode the group parameter definitions,
# group parameters will become extension header
groups = phdr['GROUPS']
gcount = phdr['GCOUNT']
pcount = phdr['PCOUNT']
formats = []
bools = []
floats = []
_range = list(range(1, pcount+1))
key = [phdr['PTYPE'+str(j)] for j in _range]
comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
# delete group parameter definition header keywords
_list = ['PTYPE'+str(j) for j in _range] + \
['PDTYPE'+str(j) for j in _range] + \
['PSIZE'+str(j) for j in _range] + \
['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
# Construct record array formats for the group parameters
# as interpreted from the Primary header file
for i in range(1, pcount+1):
ptype = key[i-1]
pdtype = phdr['PDTYPE'+str(i)]
star = pdtype.find('*')
_type = pdtype[:star]
_bytes = pdtype[star+1:]
# collect boolean keywords since they need special attention later
if _type == 'LOGICAL':
bools.append(i)
if pdtype == 'REAL*4':
floats.append(i)
fmt = geis_fmt[_type] + _bytes
formats.append((ptype,fmt))
_shape = _naxis[1:]
_shape.reverse()
_code = fits.BITPIX2DTYPE[_bitpix]
_bscale = phdr.get('BSCALE', 1)
_bzero = phdr.get('BZERO', 0)
if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
_uint16 = 1
_bzero = 32768
else:
_uint16 = 0
# Use copy-on-write for all data types since byteswap may be needed
# in some platforms.
f1 = open(data_file, mode='rb')
dat = f1.read()
f1.close()
errormsg = ""
loc = 0
outdat = b''
for k in range(gcount):
ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
ext_dat = ext_dat.reshape(_shape).byteswap()
outdat += ext_dat.tostring()
ext_hdu = fits.hdu.ImageHDU(data=ext_dat)
rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats).byteswap()
outdat += rec.tostring()
loc += group_size
if os.path.exists(output):
os.remove(output)
if os.path.exists(out_data):
os.remove(out_data)
shutil.copy(input,output)
outfile = open(out_data,mode='wb')
outfile.write(outdat)
outfile.close()
print('Finished byte-swapping ',input,' to ',output)
#-------------------------------------------------------------------------------
"""Input GEIS files "input" will be read and a HDUList object will
be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.
The user can use the writeto method to write the HDUList object to
a FITS file.
"""
# global dat # !!! (looks like this is a function missing its head)
cardLen = fits.Card.length
# input file(s) must be of the form *.??h and *.??d
if input[-1] != 'h' or input[-4] != '.':
raise "Illegal input GEIS file name %s" % input
data_file = input[:-1]+'d'
_os = sys.platform
if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
bytes_per_line = cardLen+1
else:
raise "Platform %s is not supported (yet)." % _os
end_card = 'END'+' '* (cardLen-3)
# open input file
im = open(input)
# Generate the primary HDU
cards = []
while 1:
line = im.read(bytes_per_line)[:cardLen]
line = line[:8].upper() + line[8:]
if line == end_card:
break
cards.append(fits.Card.fromstring(line))
phdr = fits.Header(cards)
im.close()
phdr.set('FILENAME', value=input, after='DATE')
# Determine starting point for adding Group Parameter Block keywords to Primary header
phdr_indx = phdr.index('PSIZE')
_naxis0 = phdr.get('NAXIS', 0)
_naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
_naxis.insert(0, _naxis0)
_bitpix = phdr['BITPIX']
_psize = phdr['PSIZE']
if phdr['DATATYPE'][:4] == 'REAL':
_bitpix = -_bitpix
if _naxis0 > 0:
size = reduce(lambda x,y:x*y, _naxis[1:])
data_size = abs(_bitpix) * size // 8
else:
data_size = 0
group_size = data_size + _psize // 8
# decode the group parameter definitions,
# group parameters will become extension table
groups = phdr['GROUPS']
gcount = phdr['GCOUNT']
pcount = phdr['PCOUNT']
formats = []
bools = []
floats = []
cols = [] # column definitions used for extension table
cols_dict = {} # provides name access to Column defs
_range = list(range(1, pcount+1))
key = [phdr['PTYPE'+str(j)] for j in _range]
comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
# delete group parameter definition header keywords
_list = ['PTYPE'+str(j) for j in _range] + \
['PDTYPE'+str(j) for j in _range] + \
['PSIZE'+str(j) for j in _range] + \
['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
# Construct record array formats for the group parameters
# as interpreted from the Primary header file
for i in range(1, pcount+1):
ptype = key[i-1]
pdtype = phdr['PDTYPE'+str(i)]
star = pdtype.find('*')
_type = pdtype[:star]
_bytes = pdtype[star+1:]
# collect boolean keywords since they need special attention later
if _type == 'LOGICAL':
bools.append(i)
if pdtype == 'REAL*4':
floats.append(i)
# identify keywords which require conversion to special units
if ptype in kw_DOUBLE:
_type = 'DOUBLE'
fmt = geis_fmt[_type] + _bytes
formats.append((ptype,fmt))
# Set up definitions for use in creating the group-parameter block table
nrpt = ''
nbits = str(int(_bytes)*8)
if 'CHAR' in _type:
nrpt = _bytes
nbits = _bytes
afmt = cols_fmt[_type]+ nbits
if 'LOGICAL' in _type:
afmt = cols_fmt[_type]
cfmt = cols_pfmt[_type]+nrpt
#print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt
cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt))
cols.append(cols_dict[ptype]) # This keeps the columns in order
_shape = _naxis[1:]
_shape.reverse()
_code = fits.BITPIX2DTYPE[_bitpix]
_bscale = phdr.get('BSCALE', 1)
_bzero = phdr.get('BZERO', 0)
if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
_uint16 = 1
_bzero = 32768
else:
_uint16 = 0
# delete from the end, so it will not conflict with previous delete
for i in range(len(phdr)-1, -1, -1):
if phdr.cards[i].keyword in _list:
del phdr[i]
# clean up other primary header keywords
phdr['SIMPLE'] = True
phdr['GROUPS'] = False
_after = 'NAXIS'
if _naxis0 > 0:
_after += str(_naxis0)
phdr.set('EXTEND', value=True,
comment="FITS dataset may contain extensions",
after=_after)
# Use copy-on-write for all data types since byteswap may be needed
# in some platforms.
f1 = open(data_file, mode='rb')
dat = f1.read()
errormsg = ""
# Define data array for all groups
arr_shape = _naxis[:]
arr_shape[0] = gcount
arr_stack = numpy.zeros(arr_shape,dtype=_code)
loc = 0
for k in range(gcount):
ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
ext_dat = ext_dat.reshape(_shape)
if _uint16:
ext_dat += _bzero
# Check to see whether there are any NaN's or infs which might indicate
# a byte-swapping problem, such as being written out on little-endian
# and being read in on big-endian or vice-versa.
if _code.find('float') >= 0 and \
(numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had floating point data values =\n"
errormsg += "= of NaN and/or Inf. =\n"
errormsg += "===================================\n"
elif _code.find('int') >= 0:
# Check INT data for max values
ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
if ext_dat_exp.max() == int(_bitpix) - 1:
# Potential problems with byteswapping
errormsg += "===================================\n"
errormsg += "= WARNING: =\n"
errormsg += "= Input image: =\n"
errormsg += input+"[%d]\n"%(k+1)
errormsg += "= had integer data values =\n"
errormsg += "= with maximum bitvalues. =\n"
errormsg += "===================================\n"
arr_stack[k] = ext_dat
rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)
loc += group_size
# Add data from this GPB to table
for i in range(1, pcount+1):
val = rec[0][i-1]
if i in bools:
if val:
val = 'T'
else:
val = 'F'
cols[i-1].array[k] = val
# Based on the first group, add GPB keywords to PRIMARY header
if k == 0:
# Create separate PyFITS Card objects for each entry in 'rec'
# and update Primary HDU with these keywords after PSIZE
for i in range(1, pcount+1):
#val = rec.field(i-1)[0]
val = rec[0][i-1]
if val.dtype.kind == 'S':
val = val.decode('ascii')
if i in bools:
if val:
val = True
else:
val = False
if i in floats:
# use fromstring, format in Card is deprecated in pyfits 0.9
_str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1])
_card = fits.Card.fromstring(_str)
else:
_card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1])
phdr.insert(phdr_indx+i, _card)
# deal with bscale/bzero
if (_bscale != 1 or _bzero != 0):
phdr['BSCALE'] = _bscale
phdr['BZERO'] = _bzero
#hdulist.append(ext_hdu)
# Define new table based on Column definitions
ext_table = fits.TableHDU.from_columns(cols)
ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS')
# Add column descriptions to header of table extension to match stwfits output
for i in range(len(key)):
ext_table.header.append(fits.Card(keyword=key[i], value=comm[i]))
if errormsg != "":
errormsg += "===================================\n"
errormsg += "= This file may have been =\n"
errormsg += "= written out on a platform =\n"
errormsg += "= with a different byte-order. =\n"
errormsg += "= =\n"
errormsg += "= Please verify that the values =\n"
errormsg += "= are correct or apply the =\n"
errormsg += "= '.byteswap()' method. =\n"
errormsg += "===================================\n"
print(errormsg)
f1.close()
hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)])
hdulist.append(ext_table)
return hdulist
|
Input GEIS files "input" will be read and converted to a new GEIS file
whose byte-order has been swapped from its original state.
Parameters
----------
input - str
Full filename with path of input GEIS image header file
output - str
Full filename with path of output GEIS image header file
If None, a default name will be created as input_swap.??h
clobber - bool
Overwrite any pre-existing output file? [Default: True]
Notes
-----
This function will automatically read and write out the data file using the
GEIS image naming conventions.
|
entailment
|
def start(self, measurementId, durationInSeconds=None):
"""
Initialises the device if required then enters a read loop taking data from the provider and passing it to the
handler. It will continue until either breakRead is true or the duration (if provided) has passed.
:return:
"""
logger.info(">> measurement " + measurementId +
((" for " + str(durationInSeconds)) if durationInSeconds is not None else " until break"))
self.failureCode = None
self.measurementOverflowed = False
self.dataHandler.start(measurementId)
self.breakRead = False
self.startTime = time.time()
self.doInit()
# this must follow doInit because doInit sets status to INITIALISED
self.status = RecordingDeviceStatus.RECORDING
elapsedTime = 0
try:
self._sampleIdx = 0
while True:
logger.debug(measurementId + " provideData ")
self.dataHandler.handle(self.provideData())
elapsedTime = time.time() - self.startTime
if self.breakRead or durationInSeconds is not None and elapsedTime > durationInSeconds:
logger.debug(measurementId + " breaking provideData")
self.startTime = 0
break
except:
self.status = RecordingDeviceStatus.FAILED
self.failureCode = str(sys.exc_info())
logger.exception(measurementId + " failed")
finally:
expectedSamples = self.fs * (durationInSeconds if durationInSeconds is not None else elapsedTime)
if self._sampleIdx < expectedSamples:
self.status = RecordingDeviceStatus.FAILED
self.failureCode = "Insufficient samples " + str(self._sampleIdx) + " for " + \
str(elapsedTime) + " second long run, expected " + str(expectedSamples)
self._sampleIdx = 0
if self.measurementOverflowed:
self.status = RecordingDeviceStatus.FAILED
self.failureCode = "Measurement overflow detected"
if self.status == RecordingDeviceStatus.FAILED:
logger.error("<< measurement " + measurementId + " - FAILED - " + self.failureCode)
else:
self.status = RecordingDeviceStatus.INITIALISED
logger.info("<< measurement " + measurementId + " - " + self.status.name)
self.dataHandler.stop(measurementId, self.failureCode)
if self.status == RecordingDeviceStatus.FAILED:
logger.warning("Reinitialising device after measurement failure")
self.doInit()
|
Initialises the device if required then enters a read loop taking data from the provider and passing it to the
handler. It will continue until either breakRead is true or the duration (if provided) has passed.
:return:
|
entailment
|
def get(self, targetId):
"""
Yields the analysed wav data.
:param targetId:
:return:
"""
result = self._targetController.analyse(targetId)
if result:
if len(result) == 2:
if result[1] == 404:
return result
else:
return {'name': targetId, 'data': self._jsonify(result)}, 200
else:
return None, 404
else:
return None, 500
|
Yields the analysed wav data.
:param targetId:
:return:
|
entailment
|
def put(self, targetId):
"""
stores a new target.
:param targetId: the target to store.
:return:
"""
json = request.get_json()
if 'hinge' in json:
logger.info('Storing target ' + targetId)
if self._targetController.storeFromHinge(targetId, json['hinge']):
logger.info('Stored target ' + targetId)
return None, 200
else:
return None, 500
else:
return None, 400
|
stores a new target.
:param targetId: the target to store.
:return:
|
entailment
|
def to_datetime(jdc):
'''Return a datetime for the input floating point Julian Day Count'''
year, month, day = gregorian.from_jd(jdc)
# in jdc: 0.0 = noon, 0.5 = midnight
# the 0.5 changes it to 0.0 = midnight, 0.5 = noon
frac = (jdc + 0.5) % 1
hours = int(24 * frac)
mfrac = frac * 24 - hours
mins = int(60 * round(mfrac, 6))
sfrac = mfrac * 60 - mins
secs = int(60 * round(sfrac, 6))
msfrac = sfrac * 60 - secs
# down to ms, which are 1/1000 of a second
ms = int(1000 * round(msfrac, 6))
return datetime(year, month, day, int(hours), int(mins), int(secs), int(ms), tzinfo=utc)
|
Return a datetime for the input floating point Julian Day Count
|
entailment
|
def dict_from_qs(qs):
''' Slightly introverted parser for lists of dot-notation nested fields
i.e. "period.di,period.fhr" => {"period": {"di": {}, "fhr": {}}}
'''
entries = qs.split(',') if qs.strip() else []
entries = [entry.strip() for entry in entries]
def _dict_from_qs(line, d):
if '.' in line:
key, value = line.split('.', 1)
d.setdefault(key, {})
return _dict_from_qs(value, d[key])
else:
d[line] = {}
def _default():
return defaultdict(_default)
d = defaultdict(_default)
for line in entries:
_dict_from_qs(line, d)
return d
|
Slightly introverted parser for lists of dot-notation nested fields
i.e. "period.di,period.fhr" => {"period": {"di": {}, "fhr": {}}}
|
entailment
|
def qs_from_dict(qsdict, prefix=""):
''' Same as dict_from_qs, but in reverse
i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr"
'''
prefix = prefix + '.' if prefix else ""
def descend(qsd):
for key, val in sorted(qsd.items()):
if val:
yield qs_from_dict(val, prefix + key)
else:
yield prefix + key
return ",".join(descend(qsdict))
|
Same as dict_from_qs, but in reverse
i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr"
|
entailment
|
def dbcon(func):
"""Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created."""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.dbcon is None:
# set up connection
self.dbcon = sqlite3.connect(self.db)
self.dbcur = self.dbcon.cursor()
self.dbcur.execute(SQL_SENSOR_TABLE)
self.dbcur.execute(SQL_TMPO_TABLE)
# execute function
try:
result = func(*args, **kwargs)
except Exception as e:
# on exception, first close connection and then raise
self.dbcon.rollback()
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
raise e
else:
# commit everything and close connection
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
else:
result = func(*args, **kwargs)
return result
return wrapper
|
Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created.
|
entailment
|
def add(self, sid, token):
"""
Add new sensor to the database
Parameters
----------
sid : str
SensorId
token : str
"""
try:
self.dbcur.execute(SQL_SENSOR_INS, (sid, token))
except sqlite3.IntegrityError: # sensor entry exists
pass
|
Add new sensor to the database
Parameters
----------
sid : str
SensorId
token : str
|
entailment
|
def remove(self, sid):
"""
Remove sensor from the database
Parameters
----------
sid : str
SensorID
"""
self.dbcur.execute(SQL_SENSOR_DEL, (sid,))
self.dbcur.execute(SQL_TMPO_DEL, (sid,))
|
Remove sensor from the database
Parameters
----------
sid : str
SensorID
|
entailment
|
def sync(self, *sids):
"""
Synchronise data
Parameters
----------
sids : list of str
SensorIDs to sync
Optional, leave empty to sync everything
"""
if sids == ():
sids = [sid for (sid,) in self.dbcur.execute(SQL_SENSOR_ALL)]
for sid in sids:
self.dbcur.execute(SQL_TMPO_LAST, (sid,))
last = self.dbcur.fetchone()
if last:
rid, lvl, bid, ext = last
self._clean(sid, rid, lvl, bid)
# prevent needless polling
if time.time() < bid + 256:
return
else:
rid, lvl, bid = 0, 0, 0
self._req_sync(sid, rid, lvl, bid)
|
Synchronise data
Parameters
----------
sids : list of str
SensorIDs to sync
Optional, leave empty to sync everything
|
entailment
|
def list(self, *sids):
"""
List all tmpo-blocks in the database
Parameters
----------
sids : list of str
SensorID's for which to list blocks
Optional, leave empty to get them all
Returns
-------
list[list[tuple]]
"""
if sids == ():
sids = [sid for (sid,) in self.dbcur.execute(SQL_SENSOR_ALL)]
slist = []
for sid in sids:
tlist = []
for tmpo in self.dbcur.execute(SQL_TMPO_ALL, (sid,)):
tlist.append(tmpo)
sid, rid, lvl, bid, ext, ctd, blk = tmpo
self._dprintf(DBG_TMPO_WRITE, ctd, sid, rid, lvl, bid, len(blk))
slist.append(tlist)
return slist
|
List all tmpo-blocks in the database
Parameters
----------
sids : list of str
SensorID's for which to list blocks
Optional, leave empty to get them all
Returns
-------
list[list[tuple]]
|
entailment
|
def series(self, sid, recycle_id=None, head=None, tail=None,
datetime=True):
"""
Create data Series
Parameters
----------
sid : str
recycle_id : optional
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.Series
"""
if head is None:
head = 0
else:
head = self._2epochs(head)
if tail is None:
tail = EPOCHS_MAX
else:
tail = self._2epochs(tail)
if recycle_id is None:
self.dbcur.execute(SQL_TMPO_RID_MAX, (sid,))
recycle_id = self.dbcur.fetchone()[0]
tlist = self.list(sid)[0]
srlist = []
for _sid, rid, lvl, bid, ext, ctd, blk in tlist:
if (recycle_id == rid
and head < self._blocktail(lvl, bid)
and tail >= bid):
srlist.append(self._blk2series(ext, blk, head, tail))
if len(srlist) > 0:
ts = pd.concat(srlist)
ts.name = sid
if datetime is True:
ts.index = pd.to_datetime(ts.index, unit="s", utc=True)
return ts
else:
return pd.Series([], name=sid)
|
Create data Series
Parameters
----------
sid : str
recycle_id : optional
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.Series
|
entailment
|
def dataframe(self, sids, head=0, tail=EPOCHS_MAX, datetime=True):
"""
Create data frame
Parameters
----------
sids : list[str]
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.DataFrame
"""
if head is None:
head = 0
else:
head = self._2epochs(head)
if tail is None:
tail = EPOCHS_MAX
else:
tail = self._2epochs(tail)
series = [self.series(sid, head=head, tail=tail, datetime=False)
for sid in sids]
df = pd.concat(series, axis=1)
if datetime is True:
df.index = pd.to_datetime(df.index, unit="s", utc=True)
return df
|
Create data frame
Parameters
----------
sids : list[str]
head : int | pandas.Timestamp, optional
Start of the interval
default earliest available
tail : int | pandas.Timestamp, optional
End of the interval
default max epoch
datetime : bool
convert index to datetime
default True
Returns
-------
pandas.DataFrame
|
entailment
|
def first_timestamp(self, sid, epoch=False):
"""
Get the first available timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
first_block = self.dbcur.execute(SQL_TMPO_FIRST, (sid,)).fetchone()
if first_block is None:
return None
timestamp = first_block[2]
if not epoch:
timestamp = pd.Timestamp.utcfromtimestamp(timestamp)
timestamp = timestamp.tz_localize('UTC')
return timestamp
|
Get the first available timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
|
entailment
|
def last_timestamp(self, sid, epoch=False):
"""
Get the theoretical last timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
timestamp, value = self.last_datapoint(sid, epoch)
return timestamp
|
Get the theoretical last timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
|
entailment
|
def last_datapoint(self, sid, epoch=False):
"""
Parameters
----------
sid : str
SensorId
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int, float
"""
block = self._last_block(sid)
if block is None:
return None, None
header = block['h']
timestamp, value = header['tail']
if not epoch:
timestamp = pd.Timestamp.utcfromtimestamp(timestamp)
timestamp = timestamp.tz_localize('UTC')
return timestamp, value
|
Parameters
----------
sid : str
SensorId
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int, float
|
entailment
|
def _npdelta(self, a, delta):
"""Numpy: Modifying Array Values
http://docs.scipy.org/doc/numpy/reference/arrays.nditer.html"""
for x in np.nditer(a, op_flags=["readwrite"]):
delta += x
x[...] = delta
return a
|
Numpy: Modifying Array Values
http://docs.scipy.org/doc/numpy/reference/arrays.nditer.html
|
entailment
|
def sigStrToKwArgsDict(checkFuncSig):
""" Take a check function signature (string), and parse it to get a dict
of the keyword args and their values. """
p1 = checkFuncSig.find('(')
p2 = checkFuncSig.rfind(')')
assert p1 > 0 and p2 > 0 and p2 > p1, "Invalid signature: "+checkFuncSig
argParts = irafutils.csvSplit(checkFuncSig[p1+1:p2], ',', True)
argParts = [x.strip() for x in argParts]
retval = {}
for argPair in argParts:
argSpl = argPair.split('=', 1)
if len(argSpl) > 1:
if argSpl[0] in retval:
if isinstance(retval[argSpl[0]], (list,tuple)):
retval[argSpl[0]]+=(irafutils.stripQuotes(argSpl[1]),) # 3rd
else: # 2nd in, so convert to tuple
retval[argSpl[0]] = (retval[argSpl[0]],
irafutils.stripQuotes(argSpl[1]),)
else:
retval[argSpl[0]] = irafutils.stripQuotes(argSpl[1]) # 1st in
else:
retval[argSpl[0]] = None # eg. found "triggers=, max=6, ..."
return retval
|
Take a check function signature (string), and parse it to get a dict
of the keyword args and their values.
|
entailment
|
def separateKeywords(kwArgsDict):
""" Look through the keywords passed and separate the special ones we
have added from the legal/standard ones. Return both sets as two
dicts (in a tuple), as (standardKws, ourKws) """
standardKws = {}
ourKws = {}
for k in kwArgsDict:
if k in STANDARD_KEYS:
standardKws[k]=kwArgsDict[k]
else:
ourKws[k]=kwArgsDict[k]
return (standardKws, ourKws)
|
Look through the keywords passed and separate the special ones we
have added from the legal/standard ones. Return both sets as two
dicts (in a tuple), as (standardKws, ourKws)
|
entailment
|
def addKwdArgsToSig(sigStr, kwArgsDict):
""" Alter the passed function signature string to add the given kewords """
retval = sigStr
if len(kwArgsDict) > 0:
retval = retval.strip(' ,)') # open up the r.h.s. for more args
for k in kwArgsDict:
if retval[-1] != '(': retval += ", "
retval += str(k)+"="+str(kwArgsDict[k])
retval += ')'
retval = retval
return retval
|
Alter the passed function signature string to add the given kewords
|
entailment
|
def _gauss_funct(p, fjac=None, x=None, y=None, err=None,
weights=None):
"""
Defines the gaussian function to be used as the model.
"""
if p[2] != 0.0:
Z = (x - p[1]) / p[2]
model = p[0] * np.e ** (-Z ** 2 / 2.0)
else:
model = np.zeros(np.size(x))
status = 0
if weights is not None:
if err is not None:
print("Warning: Ignoring errors and using weights.\n")
return [status, (y - model) * weights]
elif err is not None:
return [status, (y - model) / err]
else:
return [status, y - model]
|
Defines the gaussian function to be used as the model.
|
entailment
|
def gfit1d(y, x=None, err=None, weights=None, par=None, parinfo=None,
maxiter=200, quiet=0):
"""
Return the gaussian fit as an object.
Parameters
----------
y: 1D Numpy array
The data to be fitted
x: 1D Numpy array
(optional) The x values of the y array. x and y must
have the same shape.
err: 1D Numpy array
(optional) 1D array with measurement errors, must be
the same shape as y
weights: 1D Numpy array
(optiional) 1D array with weights, must be the same
shape as y
par: List
(optional) Starting values for the parameters to be fitted
parinfo: Dictionary of lists
(optional) provides additional information for the
parameters. For a detailed description see nmpfit.py.
Parinfo can be used to limit parameters or keep
some of them fixed.
maxiter: number
Maximum number of iterations to perform
Default: 200
quiet: number
if set to 1, nmpfit does not print to the screen
Default: 0
Examples
--------
>>> x = np.arange(10,20, 0.1)
>>> y= 10*np.e**(-(x-15)**2/4)
>>> print(gfit1d(y,x=x, maxiter=20,quiet=1).params)
[10. 15. 1.41421356]
"""
y = y.astype(np.float)
if weights is not None:
weights = weights.astype(np.float)
if err is not None:
err = err.astype(np.float)
if x is None and len(y.shape) == 1:
x = np.arange(len(y)).astype(np.float)
if x.shape != y.shape:
print("input arrays X and Y must be of equal shape.\n")
return
fa = {'x': x, 'y': y, 'err': err, 'weights': weights}
if par is not None:
p = par
else:
ysigma = y.std()
ind = np.nonzero(y > ysigma)[0]
if len(ind) != 0:
xind = int(ind.mean())
p2 = x[xind]
p1 = y[xind]
p3 = 1.0
else:
ymax = y.max()
ymin = y.min()
ymean= y.mean()
if (ymax - ymean) > (abs(ymin - ymean)):
p1 = ymax
else: p1 = ymin
ind = (np.nonzero(y == p1))[0]
p2 = x.mean()
p3 = 1.
p = [p1, p2, p3]
m = nmpfit.mpfit(_gauss_funct, p,parinfo = parinfo, functkw=fa,
maxiter=maxiter, quiet=quiet)
if (m.status <= 0): print('error message = ', m.errmsg)
return m
|
Return the gaussian fit as an object.
Parameters
----------
y: 1D Numpy array
The data to be fitted
x: 1D Numpy array
(optional) The x values of the y array. x and y must
have the same shape.
err: 1D Numpy array
(optional) 1D array with measurement errors, must be
the same shape as y
weights: 1D Numpy array
(optiional) 1D array with weights, must be the same
shape as y
par: List
(optional) Starting values for the parameters to be fitted
parinfo: Dictionary of lists
(optional) provides additional information for the
parameters. For a detailed description see nmpfit.py.
Parinfo can be used to limit parameters or keep
some of them fixed.
maxiter: number
Maximum number of iterations to perform
Default: 200
quiet: number
if set to 1, nmpfit does not print to the screen
Default: 0
Examples
--------
>>> x = np.arange(10,20, 0.1)
>>> y= 10*np.e**(-(x-15)**2/4)
>>> print(gfit1d(y,x=x, maxiter=20,quiet=1).params)
[10. 15. 1.41421356]
|
entailment
|
def filter(self, *args, **kwargs):
"""filter lets django managers use `objects.filter` on a hashable object."""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
return super().filter(*args, **kwargs)
|
filter lets django managers use `objects.filter` on a hashable object.
|
entailment
|
def _extract_model_params(self, defaults, **kwargs):
"""this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
"""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
lookup, params = super()._extract_model_params(defaults, **kwargs)
if obj is not None:
params[self.object_property_name] = obj
del params['object_hash']
return lookup, params
|
this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
|
entailment
|
def persist(self):
"""a private method that persists an estimator object to the filesystem"""
if self.object_hash:
data = dill.dumps(self.object_property)
f = ContentFile(data)
self.object_file.save(self.object_hash, f, save=False)
f.close()
self._persisted = True
return self._persisted
|
a private method that persists an estimator object to the filesystem
|
entailment
|
def load(self):
"""a private method that loads an estimator object from the filesystem"""
if self.is_file_persisted:
self.object_file.open()
temp = dill.loads(self.object_file.read())
self.set_object(temp)
self.object_file.close()
|
a private method that loads an estimator object from the filesystem
|
entailment
|
def create_from_file(cls, filename):
"""Return an Estimator object given the path of the file, relative to the MEDIA_ROOT"""
obj = cls()
obj.object_file = filename
obj.load()
return obj
|
Return an Estimator object given the path of the file, relative to the MEDIA_ROOT
|
entailment
|
def getAppDir():
""" Return our application dir. Create it if it doesn't exist. """
# Be sure the resource dir exists
theDir = os.path.expanduser('~/.')+APP_NAME.lower()
if not os.path.exists(theDir):
try:
os.mkdir(theDir)
except OSError:
print('Could not create "'+theDir+'" to save GUI settings.')
theDir = "./"+APP_NAME.lower()
return theDir
|
Return our application dir. Create it if it doesn't exist.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.