text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restoreWCS(self,prepend=None):
""" Resets the WCS values to the original values stored in the backup keywords recorded in self.backup. """ |
# Open header for image
image = self.rootname
if prepend: _prepend = prepend
elif self.prepend: _prepend = self.prepend
else: _prepend = None
# Open image as writable FITS object
fimg = fileutil.openImage(image, mode='update')
# extract the extension ID being updated
_root,_iextn = fileutil.parseFilename(self.rootname)
_extn = fileutil.getExtn(fimg,_iextn)
if len(self.backup) > 0:
# If it knows about the backup keywords already,
# use this to restore the original values to the original keywords
for newkey in self.revert.keys():
if newkey != 'opscale':
_orig_key = self.revert[newkey]
_extn.header[_orig_key] = _extn.header[newkey]
elif _prepend:
for key in self.wcstrans.keys():
# Get new keyword name based on old keyname
# and prepend string
if key != 'pixel scale':
_okey = self._buildNewKeyname(key,_prepend)
if _okey in _extn.header:
_extn.header[key] = _extn.header[_okey]
else:
print('No original WCS values found. Exiting...')
break
else:
print('No original WCS values found. Exiting...')
fimg.close()
del fimg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createReferenceWCS(self,refname,overwrite=yes):
""" Write out the values of the WCS keywords to the NEW specified image 'fitsname'. """ |
hdu = self.createWcsHDU()
# If refname already exists, delete it to make way for new file
if os.path.exists(refname):
if overwrite==yes:
# Remove previous version and re-create with new header
os.remove(refname)
hdu.writeto(refname)
else:
# Append header to existing file
wcs_append = True
oldhdu = fits.open(refname, mode='append')
for e in oldhdu:
if 'extname' in e.header and e.header['extname'] == 'WCS':
wcs_append = False
if wcs_append == True:
oldhdu.append(hdu)
oldhdu.close()
del oldhdu
else:
# No previous file, so generate new one from scratch
hdu.writeto(refname)
# Clean up
del hdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def to_jd(year, month, day, method=None):
'''Obtain Julian day from a given French Revolutionary calendar date.'''
method = method or 'equinox'
if day < 1 or day > 30:
raise ValueError("Invalid day for this calendar")
if month > 13:
raise ValueError("Invalid month for this calendar")
if month == 13 and day > 5 + leap(year, method=method):
raise ValueError("Invalid day for this month in this calendar")
if method == 'equinox':
return _to_jd_equinox(year, month, day)
else:
return _to_jd_schematic(year, month, day, method) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _to_jd_schematic(year, month, day, method):
'''Calculate JD using various leap-year calculation methods'''
y0, y1, y2, y3, y4, y5 = 0, 0, 0, 0, 0, 0
intercal_cycle_yrs, over_cycle_yrs, leap_suppression_yrs = None, None, None
# Use the every-four-years method below year 16 (madler) or below 15 (romme)
if ((method in (100, 'romme') and year < 15) or
(method in (128, 'madler') and year < 17)):
method = 4
if method in (4, 'continuous'):
# Leap years: 15, 19, 23, ...
y5 = -365
elif method in (100, 'romme'):
year = year - 13
y5 = DAYS_IN_YEAR * 12 + 3
leap_suppression_yrs = 100.
leap_suppression_days = 36524 # leap_cycle_days * 25 - 1
intercal_cycle_yrs = 400.
intercal_cycle_days = 146097 # leap_suppression_days * 4 + 1
over_cycle_yrs = 4000.
over_cycle_days = 1460969 # intercal_cycle_days * 10 - 1
elif method in (128, 'madler'):
year = year - 17
y5 = DAYS_IN_YEAR * 16 + 4
leap_suppression_days = 46751 # 32 * leap_cycle_days - 1
leap_suppression_yrs = 128
else:
raise ValueError("Unknown leap year method. Try: continuous, romme, madler or equinox")
if over_cycle_yrs:
y0 = trunc(year / over_cycle_yrs) * over_cycle_days
year = year % over_cycle_yrs
# count intercalary cycles in days (400 years long or None)
if intercal_cycle_yrs:
y1 = trunc(year / intercal_cycle_yrs) * intercal_cycle_days
year = year % intercal_cycle_yrs
# count leap suppresion cycles in days (100 or 128 years long)
if leap_suppression_yrs:
y2 = trunc(year / leap_suppression_yrs) * leap_suppression_days
year = year % leap_suppression_yrs
y3 = trunc(year / LEAP_CYCLE_YEARS) * LEAP_CYCLE_DAYS
year = year % LEAP_CYCLE_YEARS
# Adjust 'year' by one to account for lack of year 0
y4 = year * DAYS_IN_YEAR
yj = y0 + y1 + y2 + y3 + y4 + y5
mj = (month - 1) * 30
return EPOCH + yj + mj + day - 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def from_jd(jd, method=None):
'''Calculate date in the French Revolutionary
calendar from Julian day. The five or six
"sansculottides" are considered a thirteenth
month in the results of this function.'''
method = method or 'equinox'
if method == 'equinox':
return _from_jd_equinox(jd)
else:
return _from_jd_schematic(jd, method) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _from_jd_schematic(jd, method):
'''Convert from JD using various leap-year calculation methods'''
if jd < EPOCH:
raise ValueError("Can't convert days before the French Revolution")
# days since Epoch
J = trunc(jd) + 0.5 - EPOCH
y0, y1, y2, y3, y4, y5 = 0, 0, 0, 0, 0, 0
intercal_cycle_days = leap_suppression_days = over_cycle_days = None
# Use the every-four-years method below year 17
if (J <= DAYS_IN_YEAR * 12 + 3 and
method in (100, 'romme')) or (J <= DAYS_IN_YEAR * 17 + 4 and method in (128, 'madler')):
method = 4
# set p and r in Hatcher algorithm
if method in (4, 'continuous'):
# Leap years: 15, 19, 23, ...
# Reorganize so that leap day is last day of cycle
J = J + 365
y5 = - 1
elif method in (100, 'romme'):
# Year 15 is not a leap year
# Year 16 is leap, then multiples of 4, not multiples of 100, yes multiples of 400
y5 = 12
J = J - DAYS_IN_YEAR * 12 - 3
leap_suppression_yrs = 100.
leap_suppression_days = 36524 # LEAP_CYCLE_DAYS * 25 - 1
intercal_cycle_yrs = 400.
intercal_cycle_days = 146097 # leap_suppression_days * 4 + 1
over_cycle_yrs = 4000.
over_cycle_days = 1460969 # intercal_cycle_days * 10 - 1
elif method in (128, 'madler'):
# Year 15 is a leap year, then year 20 and multiples of 4, not multiples of 128
y5 = 16
J = J - DAYS_IN_YEAR * 16 - 4
leap_suppression_yrs = 128
leap_suppression_days = 46751 # 32 * leap_cycle_days - 1
else:
raise ValueError("Unknown leap year method. Try: continuous, romme, madler or equinox")
if over_cycle_days:
y0 = trunc(J / over_cycle_days) * over_cycle_yrs
J = J % over_cycle_days
if intercal_cycle_days:
y1 = trunc(J / intercal_cycle_days) * intercal_cycle_yrs
J = J % intercal_cycle_days
if leap_suppression_days:
y2 = trunc(J / leap_suppression_days) * leap_suppression_yrs
J = J % leap_suppression_days
y3 = trunc(J / LEAP_CYCLE_DAYS) * LEAP_CYCLE_YEARS
if J % LEAP_CYCLE_DAYS == LEAP_CYCLE_DAYS - 1:
J = 1460
else:
J = J % LEAP_CYCLE_DAYS
# 0 <= J <= 1460
# J needs to be 365 here on leap days ONLY
y4 = trunc(J / DAYS_IN_YEAR)
if J == DAYS_IN_YEAR * 4:
y4 = y4 - 1
J = 365.0
else:
J = J % DAYS_IN_YEAR
year = y0 + y1 + y2 + y3 + y4 + y5
month = trunc(J / 30.)
J = J - month * 30
return year + 1, month + 1, trunc(J) + 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _from_jd_equinox(jd):
'''Calculate the FR day using the equinox as day 1'''
jd = trunc(jd) + 0.5
equinoxe = premier_da_la_annee(jd)
an = gregorian.from_jd(equinoxe)[0] - YEAR_EPOCH
mois = trunc((jd - equinoxe) / 30.) + 1
jour = int((jd - equinoxe) % 30) + 1
return (an, mois, jour) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def to_jd(year, month, day):
'''Obtain Julian day for Indian Civil date'''
gyear = year + 78
leap = isleap(gyear)
# // Is this a leap year ?
# 22 - leap = 21 if leap, 22 non-leap
start = gregorian.to_jd(gyear, 3, 22 - leap)
if leap:
Caitra = 31
else:
Caitra = 30
if month == 1:
jd = start + (day - 1)
else:
jd = start + Caitra
m = month - 2
m = min(m, 5)
jd += m * 31
if month >= 8:
m = month - 7
jd += m * 30
jd += day - 1
return jd |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def from_jd(jd):
'''Calculate Indian Civil date from Julian day
Offset in years from Saka era to Gregorian epoch'''
start = 80
# Day offset between Saka and Gregorian
jd = trunc(jd) + 0.5
greg = gregorian.from_jd(jd) # Gregorian date for Julian day
leap = isleap(greg[0]) # Is this a leap year?
# Tentative year in Saka era
year = greg[0] - SAKA_EPOCH
# JD at start of Gregorian year
greg0 = gregorian.to_jd(greg[0], 1, 1)
yday = jd - greg0 # Day number (0 based) in Gregorian year
if leap:
Caitra = 31 # Days in Caitra this year
else:
Caitra = 30
if yday < start:
# Day is at the end of the preceding Saka year
year -= 1
yday += Caitra + (31 * 5) + (30 * 3) + 10 + start
yday -= start
if yday < Caitra:
month = 1
day = yday + 1
else:
mday = yday - Caitra
if (mday < (31 * 5)):
month = trunc(mday / 31) + 2
day = (mday % 31) + 1
else:
mday -= 31 * 5
month = trunc(mday / 30) + 7
day = (mday % 30) + 1
return (year, month, int(day)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_stack(skip=0, length=6, _sep=os.path.sep):
""" Returns a one-line string with the current callstack. """ |
return ' < '.join("%s:%s:%s" % (
'/'.join(f.f_code.co_filename.split(_sep)[-2:]),
f.f_lineno,
f.f_code.co_name
) for f in islice(frame_iterator(sys._getframe(1 + skip)), length)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, deviceId):
""" lists all known active measurements. """ |
measurementsByName = self.measurements.get(deviceId)
if measurementsByName is None:
return []
else:
return list(measurementsByName.values()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, deviceId, measurementId):
""" details the specific measurement. """ |
record = self.measurements.get(deviceId)
if record is not None:
return record.get(measurementId)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clicked(self):
""" Called when this button is clicked. Execute code from .cfgspc """ |
try:
from . import teal
except:
teal = None
try:
# start drilling down into the tpo to get the code
tealGui = self._mainGuiObj
tealGui.showStatus('Clicked "'+self.getButtonLabel()+'"', keep=1)
pscope = self.paramInfo.scope
pname = self.paramInfo.name
tpo = tealGui._taskParsObj
tup = tpo.getExecuteStrings(pscope, pname)
code = ''
if not tup:
if teal:
teal.popUpErr(tealGui.top, "No action to perform",
"Action Button Error")
return
for exname in tup:
if '_RULES_' in tpo and exname in tpo['_RULES_'].configspec:
ruleSig = tpo['_RULES_'].configspec[exname]
chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig)
code = chkArgsDict.get('code') # a string or None
# now go ahead and execute it
teal.execEmbCode(pscope, pname, self.getButtonLabel(),
tealGui, code)
# done
tealGui.debug('Finished: "'+self.getButtonLabel()+'"')
except Exception as ex:
msg = 'Error executing: "'+self.getButtonLabel()+'"\n'+ex.message
msgFull = msg+'\n'+''.join(traceback.format_exc())
msgFull+= "CODE:\n"+code
if tealGui:
if teal: teal.popUpErr(tealGui.top, msg, "Action Button Error")
tealGui.debug(msgFull)
else:
if teal: teal.popUpErr(None, msg, "Action Button Error")
print(msgFull) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tobytes(s, encoding='ascii'):
""" Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory. """ |
# NOTE: after we abandon 2.5, we might simply instead use "bytes(s)"
# NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b'
if PY3K:
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tostr(s, encoding='ascii'):
""" Convert string-like-thing s to the 'str' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, str and bytes are the same type. In Python 3+, this may require a decoding step. """ |
if PY3K:
if isinstance(s, str): # str == unicode in PY3K
return s
else: # s is type bytes
return s.decode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retry(func=None, retries=5, backoff=None, exceptions=(IOError, OSError, EOFError), cleanup=None, sleep=time.sleep):
""" Decorator that retries the call ``retries`` times if ``func`` raises ``exceptions``. Can use a ``backoff`` function to sleep till next retry. Example:: Success! If it reaches the retry limit:: Traceback (most recent call last):
OSError: Tough luck! """ |
@Aspect(bind=True)
def retry_aspect(cutpoint, *args, **kwargs):
for count in range(retries + 1):
try:
if count and cleanup:
cleanup(*args, **kwargs)
yield
break
except exceptions as exc:
if count == retries:
raise
if not backoff:
timeout = 0
elif isinstance(backoff, (int, float)):
timeout = backoff
else:
timeout = backoff(count)
logger.exception("%s(%s, %s) raised exception %s. %s retries left. Sleeping %s secs.",
cutpoint.__name__, args, kwargs, exc, retries - count, timeout)
sleep(timeout)
return retry_aspect if func is None else retry_aspect(func) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eparOptionFactory(master, statusBar, param, defaultParam, doScroll, fieldWidths, plugIn=None, editedCallbackObj=None, helpCallbackObj=None, mainGuiObj=None, defaultsVerb="Default", bg=None, indent=False, flagging=False, flaggedColor=None):
"""Return EparOption item of appropriate type for the parameter param""" |
# Allow passed-in overrides
if plugIn is not None:
eparOption = plugIn
# If there is an enumerated list, regardless of datatype use EnumEparOption
elif param.choice is not None:
eparOption = EnumEparOption
else:
# Use String for types not in the dictionary
eparOption = _eparOptionDict.get(param.type, StringEparOption)
# Create it
eo = eparOption(master, statusBar, param, defaultParam, doScroll,
fieldWidths, defaultsVerb, bg,
indent=indent, helpCallbackObj=helpCallbackObj,
mainGuiObj=mainGuiObj)
eo.setEditedCallbackObj(editedCallbackObj)
eo.setIsFlagging(flagging, False)
if flaggedColor:
eo.setFlaggedColor(flaggedColor)
return eo |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def popupChoices(self, event=None):
"""Popup right-click menu of special parameter operations Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled instance attributes to determine which items are available. """ |
# don't bother if all items are disabled
if NORMAL not in (self.browserEnabled, self.clearEnabled,
self.unlearnEnabled, self.helpEnabled):
return
self.menu = Menu(self.entry, tearoff = 0)
if self.browserEnabled != DISABLED:
# Handle file and directory in different functions (tkFileDialog)
if capable.OF_TKFD_IN_EPAR:
self.menu.add_command(label = "File Browser",
state = self.browserEnabled,
command = self.fileBrowser)
self.menu.add_command(label = "Directory Browser",
state = self.browserEnabled,
command = self.dirBrowser)
# Handle file and directory in the same function (filedlg)
else:
self.menu.add_command(label = "File/Directory Browser",
state = self.browserEnabled,
command = self.fileBrowser)
self.menu.add_separator()
self.menu.add_command(label = "Clear",
state = self.clearEnabled,
command = self.clearEntry)
self.menu.add_command(label = self.defaultsVerb,
state = self.unlearnEnabled,
command = self.unlearnValue)
self.menu.add_command(label = 'Help',
state = self.helpEnabled,
command = self.helpOnParam)
# Get the current y-coordinate of the Entry
ycoord = self.entry.winfo_rooty()
# Get the current x-coordinate of the cursor
xcoord = self.entry.winfo_pointerx() - XSHIFT
# Display the Menu as a popup as it is not associated with a Button
self.menu.tk_popup(xcoord, ycoord) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fileBrowser(self):
"""Invoke a tkinter file dialog""" |
if capable.OF_TKFD_IN_EPAR:
fname = askopenfilename(parent=self.entry, title="Select File")
else:
from . import filedlg
self.fd = filedlg.PersistLoadFileDialog(self.entry,
"Select File", "*")
if self.fd.Show() != 1:
self.fd.DialogCleanup()
return
fname = self.fd.GetFileName()
self.fd.DialogCleanup()
if not fname: return # canceled
self.choice.set(fname)
# don't select when we go back to widget to reduce risk of
# accidentally typing over the filename
self.lastSelection = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dirBrowser(self):
"""Invoke a tkinter directory dialog""" |
if capable.OF_TKFD_IN_EPAR:
fname = askdirectory(parent=self.entry, title="Select Directory")
else:
raise NotImplementedError('Fix popupChoices() logic.')
if not fname:
return # canceled
self.choice.set(fname)
# don't select when we go back to widget to reduce risk of
# accidentally typing over the filename
self.lastSelection = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forceValue(self, newVal, noteEdited=False):
"""Force-set a parameter entry to the given value""" |
if newVal is None:
newVal = ""
self.choice.set(newVal)
if noteEdited:
self.widgetEdited(val=newVal, skipDups=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unlearnValue(self):
"""Unlearn a parameter value by setting it back to its default""" |
defaultValue = self.defaultParamInfo.get(field = "p_filename",
native = 0, prompt = 0)
self.choice.set(defaultValue) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keypress(self, event):
"""Allow keys typed in widget to select items""" |
try:
self.choice.set(self.shortcuts[event.keysym])
except KeyError:
# key not found (probably a bug, since we intend to catch
# only events from shortcut keys, but ignore it anyway)
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def postcmd(self):
"""Make sure proper entry is activated when menu is posted""" |
value = self.choice.get()
try:
index = self.paramInfo.choice.index(value)
self.entry.menu.activate(index)
except ValueError:
# initial null value may not be in list
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convertToNative(self, aVal):
""" Convert to native bool; interpret certain strings. """ |
if aVal is None:
return None
if isinstance(aVal, bool): return aVal
# otherwise interpret strings
return str(aVal).lower() in ('1','on','yes','true') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def toggle(self, event=None):
"""Toggle value between Yes and No""" |
if self.choice.get() == "yes":
self.rbno.select()
else:
self.rbyes.select()
self.widgetEdited() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def entryCheck(self, event = None, repair = True):
""" Ensure any INDEF entry is uppercase, before base class behavior """ |
valupr = self.choice.get().upper()
if valupr.strip() == 'INDEF':
self.choice.set(valupr)
return EparOption.entryCheck(self, event, repair = repair) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setSampleSizeBytes(self):
""" updates the current record of the packet size per sample and the relationship between this and the fifo reads. """ |
self.sampleSizeBytes = self.getPacketSize()
if self.sampleSizeBytes > 0:
self.maxBytesPerFifoRead = (32 // self.sampleSizeBytes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def easter(year):
'''Calculate western easter'''
# formula taken from http://aa.usno.navy.mil/faq/docs/easter.html
c = trunc(year / 100)
n = year - 19 * trunc(year / 19)
k = trunc((c - 17) / 25)
i = c - trunc(c / 4) - trunc((c - k) / 3) + (19 * n) + 15
i = i - 30 * trunc(i / 30)
i = i - trunc(i / 28) * (1 - trunc(i / 28) * trunc(29 / (i + 1)) * trunc((21 - n) / 11))
j = year + trunc(year / 4) + i + 2 - c + trunc(c / 4)
j = j - 7 * trunc(j / 7)
l = i - j
month = 3 + trunc((l + 40) / 44)
day = l + 28 - 31 * trunc(month / 4)
return year, int(month), int(day) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert(input, width=132, output=None, keep=False):
"""Input ASCII trailer file "input" will be read. The contents will then be written out to a FITS file in the same format as used by 'stwfits' from IRAF. Parameters =========== input : str Filename of input ASCII trailer file width : int Number of characters wide to use for defining output FITS column [Default: 132] output : str Filename to use for writing out converted FITS trailer file If None, input filename will be converted from *.tra -> *_trl.fits [Default: None] keep : bool Specifies whether or not to keep any previously written FITS files [Default: False] """ |
# open input trailer file
trl = open(input)
# process all lines
lines = np.array([i for text in trl.readlines() for i in textwrap.wrap(text,width=width)])
# close ASCII trailer file now that we have processed all the lines
trl.close()
if output is None:
# create fits file
rootname,suffix = os.path.splitext(input)
s = suffix[1:].replace('ra','rl')
fitsname = "{}_{}{}fits".format(rootname,s,os.path.extsep)
else:
fitsname = output
full_name = os.path.abspath(os.path.join(os.path.curdir,fitsname))
old_file = os.path.exists(full_name)
if old_file:
if keep:
print("ERROR: Trailer file already written out as: {}".format(full_name))
raise IOError
else:
os.remove(full_name)
# Build FITS table and write it out
line_fmt = "{}A".format(width)
tbhdu = fits.BinTableHDU.from_columns([fits.Column(name='TEXT_FILE',format=line_fmt,array=lines)])
tbhdu.writeto(fitsname)
print("Created output FITS filename for trailer:{} {}".format(os.linesep,full_name))
os.remove(input) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_extra_values(conf, _prepend=()):
""" Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ |
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch(self, key):
"""Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ |
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dict(self):
""" Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. 1 0 """ |
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(self, indict):
""" A recursive update - useful for merging config files. ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ |
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rename(self, oldkey, newkey):
""" Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ |
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs):
""" Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) {'CLIENT1section': {'CLIENT1key': None}} ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ |
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_list(self, key):
""" A convenience method which fetches the specified value, guaranteeing that it is a list. [1] [1] [1] """ |
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restore_defaults(self):
""" Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ |
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_bom(self, infile):
""" Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ |
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(BOM, str) or not line.startswith(BOM):
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF8 - don't decode
if isinstance(infile, string_types):
return infile.splitlines(True)
else:
return infile
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, just return
if isinstance(infile, string_types):
# infile read from a file will be a single string
return infile.splitlines(True)
return infile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _decode(self, infile, encoding):
""" Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ |
if isinstance(infile, string_types):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
if PY3K:
if not isinstance(line, str):
infile[i] = line.decode(encoding)
else:
if not isinstance(line, unicode):
infile[i] = line.decode(encoding)
return infile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _decode_element(self, line):
"""Decode element to unicode if necessary.""" |
if not self.encoding:
return line
if isinstance(line, str) and self.default_encoding:
return line.decode(self.default_encoding)
return line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _match_depth(self, sect, depth):
""" Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ |
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_error(self, text, ErrorClass, infile, cur_index):
""" Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ |
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _unquote(self, value):
"""Return an unquoted version of a value""" |
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _quote(self, value, multiline=True):
""" Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ |
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, string_types):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation.""" |
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_configspec(self, configspec):
"""Parse the configspec.""" |
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method""" |
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line""" |
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_comment(self, comment):
"""Deal with a comment.""" |
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state.""" |
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reload(self):
""" Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ |
if not isinstance(self.filename, string_types):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged.""" |
if missing:
raise self.baseErrorClass()
return member |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _verify(waiveredHdul):
""" Verify that the input HDUList is for a waivered FITS file. Parameters: waiveredHdul HDUList object to be verified Returns: None Exceptions: ValueError Input HDUList is not for a waivered FITS file """ |
if len(waiveredHdul) == 2:
#
# There must be exactly 2 HDU's
#
if waiveredHdul[0].header['NAXIS'] > 0:
#
# The Primary HDU must have some data
#
if isinstance(waiveredHdul[1], fits.TableHDU):
#
# The Alternate HDU must be a TableHDU
#
if waiveredHdul[0].data.shape[0] == \
waiveredHdul[1].data.shape[0] or \
waiveredHdul[1].data.shape[0] == 1:
#
# The number of arrays in the Primary HDU must match
# the number of rows in the TableHDU. This includes
# the case where there is only a single array and row.
#
return
#
# Not a valid waivered Fits file
#
raise ValueError("Input object does not represent a valid waivered" + \
" FITS file") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convertwaiveredfits(waiveredObject, outputFileName=None, forceFileOutput=False, convertTo='multiExtension', verbose=False):
""" Convert the input waivered FITS object to various formats. The default conversion format is multi-extension FITS. Generate an output file in the desired format if requested. Parameters: waiveredObject input object representing a waivered FITS file; either a astropy.io.fits.HDUList object, a file object, or a file specification outputFileName file specification for the output file Default: None - do not generate an output file forceFileOutput force the generation of an output file when the outputFileName parameter is None; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character `h` in multi-extension FITS format. Default: False convertTo target conversion type Default: 'multiExtension' verbose provide verbose output Default: False Returns: hdul an HDUList object in the requested format. Exceptions: ValueError Conversion type is unknown """ |
if convertTo == 'multiExtension':
func = toMultiExtensionFits
else:
raise ValueError('Conversion type ' + convertTo + ' unknown')
return func(*(waiveredObject,outputFileName,forceFileOutput,verbose)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def to_jd(year, month, day):
'''Determine Julian day from Persian date'''
if year >= 0:
y = 474
else:
y = 473
epbase = year - y
epyear = 474 + (epbase % 2820)
if month <= 7:
m = (month - 1) * 31
else:
m = (month - 1) * 30 + 6
return day + m + trunc(((epyear * 682) - 110) / 2816) + (epyear - 1) * 365 + trunc(epbase / 2820) * 1029983 + (EPOCH - 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def from_jd(jd):
'''Calculate Persian date from Julian day'''
jd = trunc(jd) + 0.5
depoch = jd - to_jd(475, 1, 1)
cycle = trunc(depoch / 1029983)
cyear = (depoch % 1029983)
if cyear == 1029982:
ycycle = 2820
else:
aux1 = trunc(cyear / 366)
aux2 = cyear % 366
ycycle = trunc(((2134 * aux1) + (2816 * aux2) + 2815) / 1028522) + aux1 + 1
year = ycycle + (2820 * cycle) + 474
if (year <= 0):
year -= 1
yday = (jd - to_jd(year, 1, 1)) + 1
if yday <= 186:
month = ceil(yday / 31)
else:
month = ceil((yday - 6) / 30)
day = int(jd - to_jd(year, month, 1)) + 1
return (year, month, day) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def teardown_global_logging():
"""Disable global logging of stdio, warnings, and exceptions.""" |
global global_logging_started
if not global_logging_started:
return
stdout_logger = logging.getLogger(__name__ + '.stdout')
stderr_logger = logging.getLogger(__name__ + '.stderr')
if sys.stdout is stdout_logger:
sys.stdout = sys.stdout.stream
if sys.stderr is stderr_logger:
sys.stderr = sys.stderr.stream
# If we still have an unhandled exception go ahead and handle it with the
# replacement excepthook before deleting it
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_type is not None:
sys.excepthook(exc_type, exc_value, exc_traceback)
del exc_type
del exc_value
del exc_traceback
if not PY3K:
sys.exc_clear()
del sys.excepthook
logging.captureWarnings(False)
rawinput = 'input' if PY3K else 'raw_input'
if hasattr(builtins, '_original_raw_input'):
setattr(builtins, rawinput, builtins._original_raw_input)
del builtins._original_raw_input
global_logging_started = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None, stream=None, level=logging.INFO, filename=None, filemode='w', filelevel=None, propagate=True):
""" Do basic configuration for the logging system. Similar to logging.basicConfig but the logger ``name`` is configurable and both a file output and a stream output can be created. Returns a logger object. The default behaviour is to create a logger called ``name`` with a null handled, and to use the "%(levelname)s: %(message)s" format string, and add the handler to the ``name`` logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. :param name: Logger name :param format: handler format string :param datefmt: handler date/time format specifier :param stream: add a StreamHandler using ``stream`` (None disables the stream, default=None) :param level: logger level (default=INFO). :param filename: add a FileHandler using ``filename`` (default=None) :param filemode: open ``filename`` with specified filemode ('w' or 'a') :param filelevel: logger level for file logger (default=``level``) :param propagate: propagate message to parent (default=True) :returns: logging.Logger object """ |
# Get a logger for the specified name
logger = logging.getLogger(name)
logger.setLevel(level)
fmt = logging.Formatter(format, datefmt)
logger.propagate = propagate
# Remove existing handlers, otherwise multiple handlers can accrue
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
# Add handlers. Add NullHandler if no file or stream output so that
# modules don't emit a warning about no handler.
if not (filename or stream):
logger.addHandler(logging.NullHandler())
if filename:
hdlr = logging.FileHandler(filename, filemode)
if filelevel is None:
filelevel = level
hdlr.setLevel(filelevel)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
if stream:
hdlr = logging.StreamHandler(stream)
hdlr.setLevel(level)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return logger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _post_login_page(self, login_url):
"""Login to HydroQuebec website.""" |
data = {"login": self.username,
"_58_password": self.password}
try:
raw_res = yield from self._session.post(login_url,
data=data,
timeout=self._timeout,
allow_redirects=False)
except OSError:
raise PyHydroQuebecError("Can not submit login form")
if raw_res.status != 302:
raise PyHydroQuebecError("Login error: Bad HTTP status code. "
"Please check your username/password.")
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_p_p_id_and_contract(self):
"""Get id of consumption profile.""" |
contracts = {}
try:
raw_res = yield from self._session.get(PROFILE_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
# Search contracts
for node in soup.find_all('span', {"class": "contrat"}):
rematch = re.match("C[a-z]* ([0-9]{4} [0-9]{5})", node.text)
if rematch is not None:
contracts[rematch.group(1).replace(" ", "")] = None
# search for links
for node in soup.find_all('a', {"class": "big iconLink"}):
for contract in contracts:
if contract in node.attrs.get('href'):
contracts[contract] = node.attrs.get('href')
# Looking for p_p_id
p_p_id = None
for node in soup.find_all('span'):
node_id = node.attrs.get('id', "")
if node_id.startswith("p_portraitConsommation_WAR"):
p_p_id = node_id[2:]
break
if p_p_id is None:
raise PyHydroQuebecError("Could not get p_p_id")
return p_p_id, contracts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_lonely_contract(self):
"""Get contract number when we have only one contract.""" |
contracts = {}
try:
raw_res = yield from self._session.get(MAIN_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get main page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
info_node = soup.find("div", {"class": "span3 contrat"})
if info_node is None:
raise PyHydroQuebecError("Can not found contract")
research = re.search("Contrat ([0-9]{4} [0-9]{5})", info_node.text)
if research is not None:
contracts[research.group(1).replace(" ", "")] = None
if contracts == {}:
raise PyHydroQuebecError("Can not found contract")
return contracts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_balances(self):
"""Get all balances. .. todo:: IT SEEMS balances are shown (MAIN_URL) in the same order that contracts in profile page (PROFILE_URL). Maybe we should ensure that. """ |
balances = []
try:
raw_res = yield from self._session.get(MAIN_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get main page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
solde_nodes = soup.find_all("div", {"class": "solde-compte"})
if solde_nodes == []:
raise PyHydroQuebecError("Can not found balance")
for solde_node in solde_nodes:
try:
balance = solde_node.find("p").text
except AttributeError:
raise PyHydroQuebecError("Can not found balance")
balances.append(float(balance[:-2]
.replace(",", ".")
.replace("\xa0", "")))
return balances |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_contract_page(self, contract_url):
"""Load the profile page of a specific contract when we have multiple contracts.""" |
try:
yield from self._session.get(contract_url,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page for a "
"specific contract") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_annual_data(self, p_p_id):
"""Get annual data.""" |
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesConsommationAnnuelles"}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecAnnualError("Can not get annual data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get annual data")
if not json_output.get('success'):
raise PyHydroQuebecAnnualError("Could not get annual data")
if not json_output.get('results'):
raise PyHydroQuebecAnnualError("Could not get annual data")
if 'courant' not in json_output.get('results')[0]:
raise PyHydroQuebecAnnualError("Could not get annual data")
return json_output.get('results')[0]['courant'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_monthly_data(self, p_p_id):
"""Get monthly data.""" |
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_resource_id": ("resourceObtenirDonnees"
"PeriodesConsommation")}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get monthly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecError("Could not get monthly data")
if not json_output.get('success'):
raise PyHydroQuebecError("Could not get monthly data")
return json_output.get('results') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_hourly_data(self, day_date, p_p_id):
"""Get Hourly Data.""" |
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesConsommationHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"date": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_consumption_data = json_output['results']['listeDonneesConsoEnergieHoraire']
hourly_power_data = json_output['results']['listeDonneesConsoPuissanceHoraire']
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesMeteoHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"dateDebut": day_date,
"dateFin": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_weather_data = []
if not json_output.get('results'):
# Missing Temperature data from Hydro-Quebec (but don't crash the app for that)
hourly_weather_data = [None]*24
else:
hourly_weather_data = json_output['results'][0]['listeTemperaturesHeure']
# Add temp in data
processed_hourly_data = [{'hour': data['heure'],
'lower': data['consoReg'],
'high': data['consoHaut'],
'total': data['consoTotal'],
'temp': hourly_weather_data[i]}
for i, data in enumerate(hourly_consumption_data)]
raw_hourly_data = {'Energy': hourly_consumption_data,
'Power': hourly_power_data,
'Weather': hourly_weather_data}
hourly_data = {'processed_hourly_data': processed_hourly_data,
'raw_hourly_data': raw_hourly_data}
return hourly_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_data_detailled_energy_use(self, start_date=None, end_date=None):
"""Get detailled energy use from a specific contract.""" |
if start_date is None:
start_date = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
if end_date is None:
end_date = datetime.datetime.now(HQ_TIMEZONE)
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = yield from self._get_login_page()
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
p_p_id, contracts = yield from self._get_p_p_id_and_contract()
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = yield from self._get_lonely_contract()
# For all contracts
for contract, contract_url in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url)
data = {}
dates = [(start_date + datetime.timedelta(n))
for n in range(int((end_date - start_date).days))]
for date in dates:
# Get Hourly data
day_date = date.strftime("%Y-%m-%d")
hourly_data = yield from self._get_hourly_data(day_date, p_p_id)
data[day_date] = hourly_data['raw_hourly_data']
# Add contract
self._data[contract] = data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_data(self):
"""Get the latest data from HydroQuebec.""" |
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = yield from self._get_login_page()
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
p_p_id, contracts = yield from self._get_p_p_id_and_contract()
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = yield from self._get_lonely_contract()
# Get balance
balances = yield from self._get_balances()
balances_len = len(balances)
balance_id = 0
# For all contracts
for contract, contract_url in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url)
# Get Hourly data
try:
yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
day_date = yesterday.strftime("%Y-%m-%d")
hourly_data = yield from self._get_hourly_data(day_date, p_p_id)
hourly_data = hourly_data['processed_hourly_data']
except Exception: # pylint: disable=W0703
# We don't have hourly data for some reason
hourly_data = {}
# Get Annual data
try:
annual_data = yield from self._get_annual_data(p_p_id)
except PyHydroQuebecAnnualError:
# We don't have annual data, which is possible if your
# contract is younger than 1 year
annual_data = {}
# Get Monthly data
monthly_data = yield from self._get_monthly_data(p_p_id)
monthly_data = monthly_data[0]
# Get daily data
start_date = monthly_data.get('dateDebutPeriode')
end_date = monthly_data.get('dateFinPeriode')
try:
daily_data = yield from self._get_daily_data(p_p_id, start_date, end_date)
except Exception: # pylint: disable=W0703
daily_data = []
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
daily_data = daily_data[0]['courant']
# format data
contract_data = {"balance": balances[balance_id]}
for key1, key2 in MONTHLY_MAP:
contract_data[key1] = monthly_data[key2]
for key1, key2 in ANNUAL_MAP:
contract_data[key1] = annual_data.get(key2, "")
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
for key1, key2 in DAILY_MAP:
contract_data[key1] = daily_data[key2]
# Hourly
if hourly_data:
contract_data['yesterday_hourly_consumption'] = hourly_data
# Add contract
self._data[contract] = contract_data
balance_count = balance_id + 1
if balance_count < balances_len:
balance_id += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_data(self, contract=None):
"""Return collected data.""" |
if contract is None:
return self._data
if contract in self._data.keys():
return {contract: self._data[contract]}
raise PyHydroQuebecError("Contract {} not found".format(contract)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_argument(self, arg):
"""Validate a type or matcher argument to the constructor.""" |
if arg is None:
return arg
if isinstance(arg, type):
return InstanceOf(arg)
if not isinstance(arg, BaseMatcher):
raise TypeError(
"argument of %s can be a type or a matcher (got %r)" % (
self.__class__.__name__, type(arg)))
return arg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _initialize(self, *args, **kwargs):
"""Initiaize the mapping matcher with constructor arguments.""" |
self.items = None
self.keys = None
self.values = None
if args:
if len(args) != 2:
raise TypeError("expected exactly two positional arguments, "
"got %s" % len(args))
if kwargs:
raise TypeError(
"expected positional or keyword arguments, not both")
# got positional arguments only
self.keys, self.values = map(self._validate_argument, args)
elif kwargs:
has_kv = 'keys' in kwargs and 'values' in kwargs
has_of = 'of' in kwargs
if not (has_kv or has_of):
raise TypeError("expected keys/values or items matchers, "
"but got: %s" % list(kwargs.keys()))
if has_kv and has_of:
raise TypeError(
"expected keys & values, or items matchers, not both")
if has_kv:
# got keys= and values= matchers
self.keys = self._validate_argument(kwargs['keys'])
self.values = self._validate_argument(kwargs['values'])
else:
# got of= matcher, which can be a tuple of matchers,
# or a single matcher for dictionary items
of = kwargs['of']
if isinstance(of, tuple):
try:
# got of= as tuple of matchers
self.keys, self.values = \
map(self._validate_argument, of)
except ValueError:
raise TypeError(
"of= tuple has to be a pair of matchers/types" % (
self.__class__.__name__,))
else:
# got of= as a single matcher
self.items = self._validate_argument(of) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def docs(ctx, output='html', rebuild=False, show=True, verbose=True):
"""Build the docs and show them in default web browser.""" |
sphinx_build = ctx.run(
'sphinx-build -b {output} {all} {verbose} docs docs/_build'.format(
output=output,
all='-a -E' if rebuild else '',
verbose='-v' if verbose else ''))
if not sphinx_build.ok:
fatal("Failed to build the docs", cause=sphinx_build)
if show:
path = os.path.join(DOCS_OUTPUT_DIR, 'index.html')
if sys.platform == 'darwin':
path = 'file://%s' % os.path.abspath(path)
webbrowser.open_new_tab(path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload(ctx, yes=False):
"""Upload the package to PyPI.""" |
import callee
version = callee.__version__
# check the packages version
# TODO: add a 'release' to automatically bless a version as release one
if version.endswith('-dev'):
fatal("Can't upload a development version (%s) to PyPI!", version)
# run the upload if it has been confirmed by the user
if not yes:
answer = input("Do you really want to upload to PyPI [y/N]? ")
yes = answer.strip().lower() == 'y'
if not yes:
logging.warning("Aborted -- not uploading to PyPI.")
return -2
logging.debug("Uploading version %s to PyPI...", version)
setup_py_upload = ctx.run('python setup.py sdist upload')
if not setup_py_upload.ok:
fatal("Failed to upload version %s to PyPI!", version,
cause=setup_py_upload)
logging.info("PyPI upload completed successfully.")
# add a Git tag and push
git_tag = ctx.run('git tag %s' % version)
if not git_tag.ok:
fatal("Failed to add a Git tag for uploaded version %s", version,
cause=git_tag)
git_push = ctx.run('git push && git push --tags')
if not git_push.ok:
fatal("Failed to push the release upstream.", cause=git_push) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fatal(*args, **kwargs):
"""Log an error message and exit. Following arguments are keyword-only. :param exitcode: Optional exit code to use :param cause: Optional Invoke's Result object, i.e. result of a subprocess invocation """ |
# determine the exitcode to return to the operating system
exitcode = None
if 'exitcode' in kwargs:
exitcode = kwargs.pop('exitcode')
if 'cause' in kwargs:
cause = kwargs.pop('cause')
if not isinstance(cause, Result):
raise TypeError(
"invalid cause of fatal error: expected %r, got %r" % (
Result, type(cause)))
exitcode = exitcode or cause.return_code
logging.error(*args, **kwargs)
raise Exit(exitcode or -1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_request_parameters(func):
"""Adds the ratelimit and request timeout parameters to a function.""" |
# The function the decorator returns
async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs):
return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries,
request_timeout=request_timeout, **kwargs)
# We return the decorated func
return decorated_func |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _base_request(self, battle_tag: str, endpoint_name: str, session: aiohttp.ClientSession, *, platform=None, handle_ratelimit=None, max_tries=None, request_timeout=None):
"""Does a request to some endpoint. This is also where ratelimit logic is handled.""" |
# We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object
if platform is None:
platform = self.default_platform
if handle_ratelimit is None:
handle_ratelimit = self.default_handle_ratelimit
if max_tries is None:
max_tries = self.default_max_tries
if request_timeout is None:
request_timeout = self.default_request_timeout
# The battletag with #s removed
san_battle_tag = self.sanitize_battletag(battle_tag)
# The ratelimit logic
for _ in range(max_tries):
# We execute a request
try:
resp_json, status = await self._async_get(
session,
self.server_url + self._api_urlpath + "{battle_tag}/{endpoint}".format(
battle_tag=san_battle_tag,
endpoint=endpoint_name
),
params={"platform": platform},
# Passed to _async_get and indicates what platform we're searching on
headers={"User-Agent": "overwatch_python_api"},
# According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do
_async_timeout_seconds=request_timeout
)
if status == 429 and resp_json["msg"] == "you are being ratelimited":
raise RatelimitError
except RatelimitError as e:
# This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout
# We are ratelimited, so we check if we handle ratelimiting logic
# If so, we wait and then execute the next iteration of the loop
if handle_ratelimit:
# We wait to remedy ratelimiting, and we wait a bit more than the response says we should
await asyncio.sleep(resp_json["retry"] + 1)
continue
else:
raise
else:
# We didn't get an error, so we exit the loop because it was a successful request
break
else:
# The loop didn't stop because it got breaked, which means that we got ratelimited until the maximum number of tries were finished
raise RatelimitError("Got ratelimited for each requests until the maximum number of retries were reached.")
# Validate the response
if status != 200:
if status == 404 and resp_json["msg"] == "profile not found":
raise ProfileNotFoundError(
"Got HTTP 404, profile not found. This is caused by the given battletag not existing on the specified platform.")
if status == 429 and resp_json["msg"] == "you are being ratelimited":
raise RatelimitError(
"Got HTTP 429, you are being ratelimited. This is caused by calls to the api too frequently.")
raise ConnectionError("Did not get HTTP status 200, got: {0}".format(status))
return resp_json |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_method(arg, min_arity=None, max_arity=None):
"""Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum. """ |
if not callable(arg):
return False
if not any(is_(arg) for is_ in (inspect.ismethod,
inspect.ismethoddescriptor,
inspect.isbuiltin)):
return False
try:
argnames, varargs, kwargs, defaults = getargspec(arg)
except TypeError:
# On CPython 2.x, built-in methods of file aren't inspectable,
# so if it's file.read() or file.write(), we can't tell it for sure.
# Given how this check is being used, assuming the best is probably
# all we can do here.
return True
else:
if argnames and argnames[0] == 'self':
argnames = argnames[1:]
if min_arity is not None:
actual_min_arity = len(argnames) - len(defaults or ())
assert actual_min_arity >= 0, (
"Minimum arity of %r found to be negative (got %s)!" % (
arg, actual_min_arity))
if int(min_arity) != actual_min_arity:
return False
if max_arity is not None:
actual_max_arity = sys.maxsize if varargs or kwargs else len(argnames)
if int(max_arity) != actual_max_arity:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_readable(self, obj):
"""Check if the argument is a readable file-like object.""" |
try:
read = getattr(obj, 'read')
except AttributeError:
return False
else:
return is_method(read, max_arity=1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_writable(self, obj):
"""Check if the argument is a writable file-like object.""" |
try:
write = getattr(obj, 'write')
except AttributeError:
return False
else:
return is_method(write, min_arity=1, max_arity=1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(time: datetime, altkm: float, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran. """ |
glat = np.atleast_2d(glat)
glon = np.atleast_2d(glon) # has to be here
# %% altitude 1-D
if glat.size == 1 and glon.size == 1 and isinstance(time, (str, date, datetime, np.datetime64)):
atmos = rungtd1d(time, altkm, glat.squeeze()[()], glon.squeeze()[()],
f107a=f107a, f107=f107, Ap=Ap)
# %% lat/lon grid at 1 altitude
else:
atmos = loopalt_gtd(time, glat, glon, altkm,
f107a=f107a, f107=f107, Ap=Ap)
return atmos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loopalt_gtd(time: datetime, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], altkm: Union[float, List[float], np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray """ |
glat = np.atleast_2d(glat)
glon = np.atleast_2d(glon)
assert glat.ndim == glon.ndim == 2
times = np.atleast_1d(time)
assert times.ndim == 1
atmos = xarray.Dataset()
for k, t in enumerate(times):
print('computing', t)
for i in range(glat.shape[0]):
for j in range(glat.shape[1]):
# atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])),
# data_vars='minimal',coords='minimal',dim='lon')
atm = rungtd1d(t, altkm, glat[i, j], glon[i, j],
f107a=f107a, f107=f107, Ap=Ap)
atmos = xarray.merge((atmos, atm))
atmos.attrs = atm.attrs
return atmos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_desc(self, desc):
"""Validate the predicate description.""" |
if desc is None:
return desc
if not isinstance(desc, STRING_TYPES):
raise TypeError(
"predicate description for Matching must be a string, "
"got %r" % (type(desc),))
# Python 2 mandates __repr__ to be an ASCII string,
# so if Unicode is passed (usually due to unicode_literals),
# it should be ASCII-encodable.
if not IS_PY3 and isinstance(desc, unicode):
try:
desc = desc.encode('ascii', errors='strict')
except UnicodeEncodeError:
raise TypeError("predicate description must be "
"an ASCII string in Python 2")
return desc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_email(self):
""" Raise ValidationError if the contact exists. """ |
contacts = self.api.lists.contacts(id=self.list_id)['result']
for contact in contacts:
if contact['email'] == self.cleaned_data['email']:
raise forms.ValidationError(
_(u'This email is already subscribed'))
return self.cleaned_data['email'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_contact(self):
""" Create a contact with using the email on the list. """ |
self.api.lists.addcontact(
contact=self.cleaned_data['email'], id=self.list_id, method='POST') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_id(self):
""" Get or create the list id. """ |
list_id = getattr(self, '_list_id', None)
if list_id is None:
for l in self.api.lists.all()['lists']:
if l['name'] == self.list_name:
self._list_id = l['id']
if not getattr(self, '_list_id', None):
self._list_id = self.api.lists.create(
label=self.list_label, name=self.list_name,
method='POST')['list_id']
return self._list_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_tags(filename):
"""Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags """ |
with open(filename) as f:
ast_tree = ast.parse(f.read(), filename)
res = {}
for node in ast.walk(ast_tree):
if type(node) is not ast.Assign:
continue
target = node.targets[0]
if type(target) is not ast.Name:
continue
if not (target.id.startswith('__') and target.id.endswith('__')):
continue
name = target.id[2:-2]
res[name] = ast.literal_eval(node.value)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True):
""" Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ |
if ngrams is None:
ngrams = 1
text = re.sub(re.compile('\'s'), '', text) # Simple heuristic
text = re.sub(_re_punctuation, '', text)
matched_tokens = re.findall(_re_token, text.lower())
for tokens in get_ngrams(matched_tokens, ngrams):
for i in range(len(tokens)):
tokens[i] = tokens[i].strip(punctuation)
if len(tokens[i]) < min_length or tokens[i] in stopwords:
break
if ignore_numeric and isnumeric(tokens[i]):
break
else:
yield tuple(tokens) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cmake_setup():
""" attempt to build using CMake >= 3 """ |
cmake_exe = shutil.which('cmake')
if not cmake_exe:
raise FileNotFoundError('CMake not available')
wopts = ['-G', 'MinGW Makefiles', '-DCMAKE_SH="CMAKE_SH-NOTFOUND'] if os.name == 'nt' else []
subprocess.check_call([cmake_exe] + wopts + [str(SRCDIR)],
cwd=BINDIR)
ret = subprocess.run([cmake_exe, '--build', str(BINDIR)],
stderr=subprocess.PIPE,
universal_newlines=True)
result(ret) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def meson_setup():
""" attempt to build with Meson + Ninja """ |
meson_exe = shutil.which('meson')
ninja_exe = shutil.which('ninja')
if not meson_exe or not ninja_exe:
raise FileNotFoundError('Meson or Ninja not available')
if not (BINDIR / 'build.ninja').is_file():
subprocess.check_call([meson_exe, str(SRCDIR)], cwd=BINDIR)
ret = subprocess.run(ninja_exe, cwd=BINDIR, stderr=subprocess.PIPE,
universal_newlines=True)
result(ret) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_term_occurrence(self, term, document):
""" Adds an occurrence of the term in the specified document. """ |
if document not in self._documents:
self._documents[document] = 0
if term not in self._terms:
if self._freeze:
return
else:
self._terms[term] = collections.Counter()
if document not in self._terms[term]:
self._terms[term][document] = 0
self._documents[document] += 1
self._terms[term][document] += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_total_term_frequency(self, term):
""" Gets the frequency of the specified term in the entire corpus added to the HashedIndex. """ |
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
return sum(self._terms[term].values()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_term_frequency(self, term, document, normalized=False):
""" Returns the frequency of the term specified in the document. """ |
if document not in self._documents:
raise IndexError(DOCUMENT_DOES_NOT_EXIST)
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
result = self._terms[term].get(document, 0)
if normalized:
result /= self.get_document_length(document)
return float(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_document_frequency(self, term):
""" Returns the number of documents the specified term appears in. """ |
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
else:
return len(self._terms[term]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_document_length(self, document):
""" Returns the number of terms found within the specified document. """ |
if document in self._documents:
return self._documents[document]
else:
raise IndexError(DOCUMENT_DOES_NOT_EXIST) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_documents(self, term):
""" Returns all documents related to the specified term in the form of a Counter object. """ |
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
else:
return self._terms[term] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tfidf(self, term, document, normalized=False):
""" Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length. """ |
tf = self.get_term_frequency(term, document)
# Speeds up performance by avoiding extra calculations
if tf != 0.0:
# Add 1 to document frequency to prevent divide by 0
# (Laplacian Correction)
df = 1 + self.get_document_frequency(term)
n = 2 + len(self._documents)
if normalized:
tf /= self.get_document_length(document)
return tf * math.log10(n / df)
else:
return 0.0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_feature_matrix(self, mode='tfidf'):
""" Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method. """ |
result = []
for doc in self._documents:
result.append(self.generate_document_vector(doc, mode))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_class_in_list(klass, lst):
""" Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present. """ |
filtered = list(filter(lambda x: x.__class__ == klass, lst))
if filtered:
return filtered[0]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_parmlist(self, parameters):
""" Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API. """ |
args = []
for key, value in parameters.items():
if not value is None:
# We always use the explicit-length keyname format, to reduce the chance
# of requests failing due to unusual characters in parameter values.
try:
classinfo = unicode
except NameError:
classinfo = str
if isinstance(value, classinfo):
key = '%s[%d]' % (key.upper(), len(value.encode('utf-8')))
else:
key = '%s[%d]' % (key.upper(), len(str(value)))
args.append('%s=%s' % (key, value))
args.sort()
parmlist = '&'.join(args)
return parmlist |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.