sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def writeOutput(self, filename, samples, srcFs, targetFs): """ Resamples the signal to the targetFs and writes it to filename. :param filename: the filename. :param signal: the signal to resample. :param targetFs: the target fs. :return: None """ import librosa inputLength = samples.shape[-1] if srcFs != targetFs: if inputLength < targetFs: logger.info("Input signal is too short (" + str(inputLength) + " samples) for resampling to " + str(targetFs) + "Hz") outputSamples = samples targetFs = srcFs else: logger.info("Resampling " + str(inputLength) + " samples from " + str(srcFs) + "Hz to " + str(targetFs) + "Hz") outputSamples = librosa.resample(samples, srcFs, targetFs, res_type='kaiser_fast') else: outputSamples = samples logger.info("Writing output to " + filename) maxv = np.iinfo(np.int32).max librosa.output.write_wav(filename, (outputSamples * maxv).astype(np.int32), targetFs) logger.info("Output written to " + filename)
Resamples the signal to the targetFs and writes it to filename. :param filename: the filename. :param signal: the signal to resample. :param targetFs: the target fs. :return: None
entailment
def delete(self, name): """ Deletes the named entry. :param name: the entry. :return: the deleted entry. """ i, entry = next(((i, x) for i, x in enumerate(self._uploadCache) if x['name'] == name), (None, None)) if entry is not None: logger.info("Deleting " + name) os.remove(str(entry['path'])) del self._uploadCache[i] return entry else: logger.info("Unable to delete " + name + ", not found") return None
Deletes the named entry. :param name: the entry. :return: the deleted entry.
entailment
def eparOptionFactory(master, statusBar, param, defaultParam, doScroll, fieldWidths, plugIn=None, editedCallbackObj=None, helpCallbackObj=None, mainGuiObj=None, defaultsVerb="Default", bg=None, indent=False, flagging=False, flaggedColor=None): """Return EparOption item of appropriate type for the parameter param""" # Allow passed-in overrides if plugIn is not None: eparOption = plugIn # If there is an enumerated list, regardless of datatype use EnumEparOption elif param.choice is not None: eparOption = EnumEparOption else: # Use String for types not in the dictionary eparOption = _eparOptionDict.get(param.type, StringEparOption) # Create it eo = eparOption(master, statusBar, param, defaultParam, doScroll, fieldWidths, defaultsVerb, bg, indent=indent, helpCallbackObj=helpCallbackObj, mainGuiObj=mainGuiObj) eo.setEditedCallbackObj(editedCallbackObj) eo.setIsFlagging(flagging, False) if flaggedColor: eo.setFlaggedColor(flaggedColor) return eo
Return EparOption item of appropriate type for the parameter param
entailment
def extraBindingsForSelectableText(self): """ Collect in 1 place the bindings needed for watchTextSelection() """ # See notes in watchTextSelection self.entry.bind('<FocusIn>', self.watchTextSelection, "+") self.entry.bind('<ButtonRelease-1>', self.watchTextSelection, "+") self.entry.bind('<B1-Motion>', self.watchTextSelection, "+") self.entry.bind('<Shift_L>', self.watchTextSelection, "+") self.entry.bind('<Left>', self.watchTextSelection, "+") self.entry.bind('<Right>', self.watchTextSelection, "+")
Collect in 1 place the bindings needed for watchTextSelection()
entailment
def focusOut(self, event=None): """Clear selection (if text is selected in this widget)""" # do nothing if this isn't a text-enabled widget if not self.isSelectable: return if self.entryCheck(event) is None: # Entry value is OK # Save the last selection so it can be restored if we # come right back to this widget. Then clear the selection # before moving on. entry = self.entry try: if not entry.selection_present(): self.lastSelection = None else: self.lastSelection = (entry.index(SEL_FIRST), entry.index(SEL_LAST)) except AttributeError: pass if USING_X and sys.platform == 'darwin': pass # do nothing here - we need it left selected for cut/paste else: entry.selection_clear() else: return "break"
Clear selection (if text is selected in this widget)
entailment
def watchTextSelection(self, event=None): """ Callback used to see if there is a new text selection. In certain cases we manually add the text to the clipboard (though on most platforms the correct behavior happens automatically). """ # Note that this isn't perfect - it is a key click behind when # selections are made via shift-arrow. If this becomes important, it # can likely be fixed with after(). if self.entry.selection_present(): # entry must be text entry type i1 = self.entry.index(SEL_FIRST) i2 = self.entry.index(SEL_LAST) if i1 >= 0 and i2 >= 0 and i2 > i1: sel = self.entry.get()[i1:i2] # Add to clipboard on platforms where necessary. print('selected: "'+sel+'"')
Callback used to see if there is a new text selection. In certain cases we manually add the text to the clipboard (though on most platforms the correct behavior happens automatically).
entailment
def focusIn(self, event=None): """Select all text (if applicable) on taking focus""" try: # doScroll returns false if the call was ignored because the # last call also came from this widget. That avoids unwanted # scrolls and text selection when the focus moves in and out # of the window. if self.doScroll(event): self.entry.selection_range(0, END) # select all text in widget else: # restore selection to what it was on the last FocusOut if self.lastSelection: self.entry.selection_range(*self.lastSelection) except AttributeError: pass
Select all text (if applicable) on taking focus
entailment
def widgetEdited(self, event=None, val=None, action='entry', skipDups=True): """ A general method for firing any applicable triggers when a value has been set. This is meant to be easily callable from any part of this class (or its subclasses), so that it can be called as soon as need be (immed. on click?). This is smart enough to be called multiple times, itself handling the removal of any/all duplicate successive calls (unless skipDups is False). If val is None, it will use the GUI entry's current value via choice.get(). See teal.py for a description of action. """ # be as lightweight as possible if obj doesn't care about this stuff if not self._editedCallbackObj and not self._flagNonDefaultVals: return # get the current value curVal = val # take this first, if it is given if curVal is None: curVal = self.choice.get() # do any flagging self.flagThisPar(curVal, False) # see if this is a duplicate successive call for the same value if skipDups and curVal==self._lastWidgetEditedVal: return # pull trigger if not self._editedCallbackObj: return self._editedCallbackObj.edited(self.paramInfo.scope, self.paramInfo.name, self.previousValue, curVal, action) # for our duplicate checker self._lastWidgetEditedVal = curVal
A general method for firing any applicable triggers when a value has been set. This is meant to be easily callable from any part of this class (or its subclasses), so that it can be called as soon as need be (immed. on click?). This is smart enough to be called multiple times, itself handling the removal of any/all duplicate successive calls (unless skipDups is False). If val is None, it will use the GUI entry's current value via choice.get(). See teal.py for a description of action.
entailment
def popupChoices(self, event=None): """Popup right-click menu of special parameter operations Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled instance attributes to determine which items are available. """ # don't bother if all items are disabled if NORMAL not in (self.browserEnabled, self.clearEnabled, self.unlearnEnabled, self.helpEnabled): return self.menu = Menu(self.entry, tearoff = 0) if self.browserEnabled != DISABLED: # Handle file and directory in different functions (tkFileDialog) if capable.OF_TKFD_IN_EPAR: self.menu.add_command(label = "File Browser", state = self.browserEnabled, command = self.fileBrowser) self.menu.add_command(label = "Directory Browser", state = self.browserEnabled, command = self.dirBrowser) # Handle file and directory in the same function (filedlg) else: self.menu.add_command(label = "File/Directory Browser", state = self.browserEnabled, command = self.fileBrowser) self.menu.add_separator() self.menu.add_command(label = "Clear", state = self.clearEnabled, command = self.clearEntry) self.menu.add_command(label = self.defaultsVerb, state = self.unlearnEnabled, command = self.unlearnValue) self.menu.add_command(label = 'Help', state = self.helpEnabled, command = self.helpOnParam) # Get the current y-coordinate of the Entry ycoord = self.entry.winfo_rooty() # Get the current x-coordinate of the cursor xcoord = self.entry.winfo_pointerx() - XSHIFT # Display the Menu as a popup as it is not associated with a Button self.menu.tk_popup(xcoord, ycoord)
Popup right-click menu of special parameter operations Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled instance attributes to determine which items are available.
entailment
def fileBrowser(self): """Invoke a tkinter file dialog""" if capable.OF_TKFD_IN_EPAR: fname = askopenfilename(parent=self.entry, title="Select File") else: from . import filedlg self.fd = filedlg.PersistLoadFileDialog(self.entry, "Select File", "*") if self.fd.Show() != 1: self.fd.DialogCleanup() return fname = self.fd.GetFileName() self.fd.DialogCleanup() if not fname: return # canceled self.choice.set(fname) # don't select when we go back to widget to reduce risk of # accidentally typing over the filename self.lastSelection = None
Invoke a tkinter file dialog
entailment
def dirBrowser(self): """Invoke a tkinter directory dialog""" if capable.OF_TKFD_IN_EPAR: fname = askdirectory(parent=self.entry, title="Select Directory") else: raise NotImplementedError('Fix popupChoices() logic.') if not fname: return # canceled self.choice.set(fname) # don't select when we go back to widget to reduce risk of # accidentally typing over the filename self.lastSelection = None
Invoke a tkinter directory dialog
entailment
def forceValue(self, newVal, noteEdited=False): """Force-set a parameter entry to the given value""" if newVal is None: newVal = "" self.choice.set(newVal) if noteEdited: self.widgetEdited(val=newVal, skipDups=False)
Force-set a parameter entry to the given value
entailment
def unlearnValue(self): """Unlearn a parameter value by setting it back to its default""" defaultValue = self.defaultParamInfo.get(field = "p_filename", native = 0, prompt = 0) self.choice.set(defaultValue)
Unlearn a parameter value by setting it back to its default
entailment
def setActiveState(self, active): """ Use this to enable or disable (grey out) a parameter. """ st = DISABLED if active: st = NORMAL self.entry.configure(state=st) self.inputLabel.configure(state=st) self.promptLabel.configure(state=st)
Use this to enable or disable (grey out) a parameter.
entailment
def flagThisPar(self, currentVal, force): """ If this par's value is different from the default value, it is here that we flag it somehow as such. This basic version simply makes the surrounding text red (or returns it to normal). May be overridden. Leave force at False if you want to allow this mehtod to make smart time-saving decisions about when it can skip recoloring because it is already the right color. Set force to true if you think we got out of sync and need to be fixed. """ # Get out ASAP if we can if (not force) and (not self._flagNonDefaultVals): return # handle simple case before comparing values (quick return) if force and not self._flagNonDefaultVals: self._flagged = False self.promptLabel.configure(fg="black") return # Get/format values to compare currentNative = self.convertToNative(currentVal) defaultNative = self.convertToNative(self.defaultParamInfo.value) # par.value is same as par.get(native=1,prompt=0) # flag or unflag as needed if currentNative != defaultNative: if not self._flagged or force: self._flagged = True self.promptLabel.configure(fg=self._flaggedColor) # was red else: # same as def if self._flagged or force: self._flagged = False self.promptLabel.configure(fg="black")
If this par's value is different from the default value, it is here that we flag it somehow as such. This basic version simply makes the surrounding text red (or returns it to normal). May be overridden. Leave force at False if you want to allow this mehtod to make smart time-saving decisions about when it can skip recoloring because it is already the right color. Set force to true if you think we got out of sync and need to be fixed.
entailment
def keypress(self, event): """Allow keys typed in widget to select items""" try: self.choice.set(self.shortcuts[event.keysym]) except KeyError: # key not found (probably a bug, since we intend to catch # only events from shortcut keys, but ignore it anyway) pass
Allow keys typed in widget to select items
entailment
def postcmd(self): """Make sure proper entry is activated when menu is posted""" value = self.choice.get() try: index = self.paramInfo.choice.index(value) self.entry.menu.activate(index) except ValueError: # initial null value may not be in list pass
Make sure proper entry is activated when menu is posted
entailment
def convertToNative(self, aVal): """ Convert to native bool; interpret certain strings. """ if aVal is None: return None if isinstance(aVal, bool): return aVal # otherwise interpret strings return str(aVal).lower() in ('1','on','yes','true')
Convert to native bool; interpret certain strings.
entailment
def toggle(self, event=None): """Toggle value between Yes and No""" if self.choice.get() == "yes": self.rbno.select() else: self.rbyes.select() self.widgetEdited()
Toggle value between Yes and No
entailment
def entryCheck(self, event = None, repair = True): """ Ensure any INDEF entry is uppercase, before base class behavior """ valupr = self.choice.get().upper() if valupr.strip() == 'INDEF': self.choice.set(valupr) return EparOption.entryCheck(self, event, repair = repair)
Ensure any INDEF entry is uppercase, before base class behavior
entailment
def xyinterp(x,y,xval): """ :Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06 """ #Enforce conditions on x, y, and xval: #x and y must correspond if len(x) != len(y): raise ValueError("Input arrays must be equal lengths") #Extrapolation not supported if xval < x[0]: raise ValueError("Value %f < min(x) %f: Extrapolation unsupported"%(xval,x[0])) if xval > x[-1]: raise ValueError("Value > max(x): Extrapolation unsupported") #This algorithm only works on sorted data if x.argsort().all() != N.arange(len(x)).all(): raise ValueError("Input array x must be sorted") # Now do the real work. hi = x.searchsorted(xval) lo = hi - 1 try: seg = (float(xval)-x[lo]) / (x[hi] - x[lo]) except ZeroDivisionError: seg = 0.0 yval = y[lo] + seg*(y[hi] - y[lo]) return yval
:Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06
entailment
def _setSampleSizeBytes(self): """ updates the current record of the packet size per sample and the relationship between this and the fifo reads. """ self.sampleSizeBytes = self.getPacketSize() if self.sampleSizeBytes > 0: self.maxBytesPerFifoRead = (32 // self.sampleSizeBytes)
updates the current record of the packet size per sample and the relationship between this and the fifo reads.
entailment
def getPacketSize(self): """ the current packet size. :return: the current packet size based on the enabled registers. """ size = 0 if self.isAccelerometerEnabled(): size += 6 if self.isGyroEnabled(): size += 6 if self.isTemperatureEnabled(): size += 2 return size
the current packet size. :return: the current packet size based on the enabled registers.
entailment
def initialiseDevice(self): """ performs initialisation of the device :param batchSize: the no of samples that each provideData call should yield :return: """ logger.debug("Initialising device") self.getInterruptStatus() self.setAccelerometerSensitivity(self._accelerationFactor * 32768.0) self.setGyroSensitivity(self._gyroFactor * 32768.0) self.setSampleRate(self.fs) for loop in self.ZeroRegister: self.i2c_io.write(self.MPU6050_ADDRESS, loop, 0) # Sets clock source to gyro reference w/ PLL self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_1, 0b00000010) # Controls frequency of wakeups in accel low power mode plus the sensor standby modes self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_2, 0x00) # Enables any I2C master interrupt source to generate an interrupt self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_INT_ENABLE, 0x01) # enable the FIFO self.enableFifo() logger.debug("Initialised device")
performs initialisation of the device :param batchSize: the no of samples that each provideData call should yield :return:
entailment
def enableAccelerometer(self): """ Specifies the device should write acceleration values to the FIFO, is not applied until enableFIFO is called. :return: """ logger.debug("Enabling acceleration sensor") self.fifoSensorMask |= self.enableAccelerometerMask self._accelEnabled = True self._setSampleSizeBytes()
Specifies the device should write acceleration values to the FIFO, is not applied until enableFIFO is called. :return:
entailment
def disableAccelerometer(self): """ Specifies the device should NOT write acceleration values to the FIFO, is not applied until enableFIFO is called. :return: """ logger.debug("Disabling acceleration sensor") self.fifoSensorMask &= ~self.enableAccelerometerMask self._accelEnabled = False self._setSampleSizeBytes()
Specifies the device should NOT write acceleration values to the FIFO, is not applied until enableFIFO is called. :return:
entailment
def enableGyro(self): """ Specifies the device should write gyro values to the FIFO, is not applied until enableFIFO is called. :return: """ logger.debug("Enabling gyro sensor") self.fifoSensorMask |= self.enableGyroMask self._gyroEnabled = True self._setSampleSizeBytes()
Specifies the device should write gyro values to the FIFO, is not applied until enableFIFO is called. :return:
entailment
def disableGyro(self): """ Specifies the device should NOT write gyro values to the FIFO, is not applied until enableFIFO is called. :return: """ logger.debug("Disabling gyro sensor") self.fifoSensorMask &= ~self.enableGyroMask self._gyroEnabled = False self._setSampleSizeBytes()
Specifies the device should NOT write gyro values to the FIFO, is not applied until enableFIFO is called. :return:
entailment
def enableTemperature(self): """ Specifies the device should write temperature values to the FIFO, is not applied until enableFIFO is called. :return: """ logger.debug("Enabling temperature sensor") self.fifoSensorMask |= self.enableTemperatureMask self._setSampleSizeBytes()
Specifies the device should write temperature values to the FIFO, is not applied until enableFIFO is called. :return:
entailment
def disableTemperature(self): """ Specifies the device should NOT write temperature values to the FIFO, is not applied until enableFIFO is called. :return: """ logger.debug("Disabling temperature sensor") self.fifoSensorMask &= ~self.enableTemperatureMask self._setSampleSizeBytes()
Specifies the device should NOT write temperature values to the FIFO, is not applied until enableFIFO is called. :return:
entailment
def setGyroSensitivity(self, value): """ Sets the gyro sensitivity to 250, 500, 1000 or 2000 according to the given value (and implicitly disables the self tests) :param value: the target sensitivity. """ try: self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_GYRO_CONFIG, {250: 0, 500: 8, 1000: 16, 2000: 24}[value]) self._gyroFactor = value / 32768.0 self.gyroSensitivity = value logger.debug("Set gyro sensitivity = %d", value) except KeyError: raise ArgumentError(value + " is not a valid sensitivity (250,500,1000,2000)")
Sets the gyro sensitivity to 250, 500, 1000 or 2000 according to the given value (and implicitly disables the self tests) :param value: the target sensitivity.
entailment
def setAccelerometerSensitivity(self, value): """ Sets the accelerometer sensitivity to 2, 4, 8 or 16 according to the given value. Throws an ArgumentError if the value provided is not valid. :param value: the target sensitivity. """ # note that this implicitly disables the self tests on each axis # i.e. the full byte is actually 000[accel]000 where the 1st 3 are the accelerometer self tests, the next two # values are the actual sensitivity and the last 3 are unused # the 2 [accel] bits are translated by the device as follows; 00 = 2g, 01 = 4g, 10 = 8g, 11 = 16g # in binary we get 2 = 0, 4 = 1000, 8 = 10000, 16 = 11000 # so the 1st 3 bits are always 0 try: self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_ACCEL_CONFIG, {2: 0, 4: 8, 8: 16, 16: 24}[value]) self._accelerationFactor = value / 32768.0 self.accelerometerSensitivity = value logger.debug("Set accelerometer sensitivity = %d", value) except KeyError: raise ArgumentError(value + " is not a valid sensitivity (2,4,8,18)")
Sets the accelerometer sensitivity to 2, 4, 8 or 16 according to the given value. Throws an ArgumentError if the value provided is not valid. :param value: the target sensitivity.
entailment
def setSampleRate(self, targetSampleRate): """ Sets the internal sample rate of the MPU-6050, this requires writing a value to the device to set the sample rate as Gyroscope Output Rate / (1 + SMPLRT_DIV) where the gryoscope outputs at 8kHz and the peak sampling rate is 1kHz. The target sample rate is therefore capped at 1kHz. :param targetSampleRate: the target sample rate. :return: """ sampleRateDenominator = int((8000 / min(targetSampleRate, 1000)) - 1) self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_SMPLRT_DIV, sampleRateDenominator) self.fs = 8000.0 / (sampleRateDenominator + 1.0) logger.debug("Set sample rate = %d", self.fs)
Sets the internal sample rate of the MPU-6050, this requires writing a value to the device to set the sample rate as Gyroscope Output Rate / (1 + SMPLRT_DIV) where the gryoscope outputs at 8kHz and the peak sampling rate is 1kHz. The target sample rate is therefore capped at 1kHz. :param targetSampleRate: the target sample rate. :return:
entailment
def resetFifo(self): """ Resets the FIFO by first disabling the FIFO then sending a FIFO_RESET and then re-enabling the FIFO. :return: """ logger.debug("Resetting FIFO") self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_USER_CTRL, 0b00000000) pass self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_USER_CTRL, 0b00000100) pass self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_USER_CTRL, 0b01000000) self.getInterruptStatus()
Resets the FIFO by first disabling the FIFO then sending a FIFO_RESET and then re-enabling the FIFO. :return:
entailment
def enableFifo(self): """ Enables the FIFO, resets it and then sets which values should be written to the FIFO. :return: """ logger.debug("Enabling FIFO") self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_EN, 0) self.resetFifo() self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_EN, self.fifoSensorMask) logger.debug("Enabled FIFO")
Enables the FIFO, resets it and then sets which values should be written to the FIFO. :return:
entailment
def getFifoCount(self): """ gets the amount of data available on the FIFO right now. :return: the number of bytes available on the FIFO which will be proportional to the number of samples available based on the values the device is configured to sample. """ bytes = self.i2c_io.readBlock(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_COUNTH, 2) count = (bytes[0] << 8) + bytes[1] logger.debug("FIFO Count: %d", count) return count
gets the amount of data available on the FIFO right now. :return: the number of bytes available on the FIFO which will be proportional to the number of samples available based on the values the device is configured to sample.
entailment
def getDataFromFIFO(self, bytesToRead): """ reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there is new data available (to avoid reading duplicate data). :param bytesToRead: the number of bytes to read. :return: the bytes read. """ return self.i2c_io.readBlock(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_R_W, bytesToRead)
reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there is new data available (to avoid reading duplicate data). :param bytesToRead: the number of bytes to read. :return: the bytes read.
entailment
def provideData(self): """ reads a batchSize batch of data from the FIFO while attempting to optimise the number of times we have to read from the device itself. :return: a list of data where each item is a single sample of data converted into real values and stored as a dict. """ samples = [] fifoBytesAvailable = 0 fifoWasReset = False logger.debug(">> provideData target %d samples", self.samplesPerBatch) iterations = 0 # allow 1.5x the expected duration of the batch breakTime = time() + ((self.samplesPerBatch / self.fs) * 1.5) overdue = False while len(samples) < self.samplesPerBatch and not overdue: iterations += 1 if iterations > self.samplesPerBatch and iterations % 100 == 0: if time() > breakTime: logger.warning("Breaking measurement after %d iterations, batch overdue", iterations) overdue = True if fifoBytesAvailable < self.sampleSizeBytes or fifoWasReset: interrupt = self.getInterruptStatus() fifoBytesAvailable = self.getFifoCount() fifoWasReset = False logger.debug("Start sample loop [available: %d , required: %d]", fifoBytesAvailable, self.sampleSizeBytes) if interrupt & 0x10: logger.error("FIFO OVERFLOW, RESETTING [available: %d , interrupt: %d]", fifoBytesAvailable, interrupt) self.measurementOverflowed = True self.resetFifo() fifoWasReset = True elif fifoBytesAvailable == 1024: logger.error("FIFO FULL, RESETTING [available: %d , interrupt: %d]", fifoBytesAvailable, interrupt) self.measurementOverflowed = True self.resetFifo() fifoWasReset = True elif interrupt & 0x02 or interrupt & 0x01: # wait for at least 1 sample to arrive, should be a VERY short wait while fifoBytesAvailable < self.sampleSizeBytes: logger.debug("Waiting for sample [available: %d , required: %d]", fifoBytesAvailable, self.sampleSizeBytes) fifoBytesAvailable = self.getFifoCount() logger.debug("Processing data [available: %d , required: %d]", fifoBytesAvailable, self.sampleSizeBytes) fifoReadBytes = self.sampleSizeBytes # TODO this chunk of code is a bit messy, tidy it up # if we have more than 1 sample available then ensure we read as many as we can at once (albeit within # the limits of the max i2c read size of 32 bytes) if fifoBytesAvailable > self.sampleSizeBytes: fifoReadBytes = min(fifoBytesAvailable // self.sampleSizeBytes, self.maxBytesPerFifoRead) * self.sampleSizeBytes logger.debug("Excess bytes to read [available: %d , reading: %d]", fifoBytesAvailable, fifoReadBytes) # but don't read more than we need to fulfil the batch samplesToRead = fifoReadBytes // self.sampleSizeBytes excessSamples = self.samplesPerBatch - len(samples) - samplesToRead if excessSamples < 0: samplesToRead += excessSamples fifoReadBytes = int(samplesToRead * self.sampleSizeBytes) logger.debug("Excess samples to read [available: %d , reading: %d]", fifoBytesAvailable, fifoReadBytes) else: logger.debug("Reading [available: %d , reading: %d]", fifoBytesAvailable, fifoReadBytes) # read the bytes from the fifo, break it into sample sized chunks and convert to the actual values fifoBytes = self.getDataFromFIFO(fifoReadBytes) samples.extend([self.unpackSample(fifoBytes[i:i + self.sampleSizeBytes]) for i in range(0, len(fifoBytes), self.sampleSizeBytes)]) # track the count here so we can avoid going back to the FIFO each time fifoBytesAvailable -= fifoReadBytes logger.debug("End sample loop [available: %d , required: %d]", fifoBytesAvailable, self.sampleSizeBytes) logger.debug("<< provideData %d samples", len(samples)) return samples
reads a batchSize batch of data from the FIFO while attempting to optimise the number of times we have to read from the device itself. :return: a list of data where each item is a single sample of data converted into real values and stored as a dict.
entailment
def unpackSample(self, rawData): """ unpacks a single sample of data (where sample length is based on the currently enabled sensors). :param rawData: the data to convert :return: a converted data set. """ length = len(rawData) # TODO error if not multiple of 2 # logger.debug(">> unpacking sample %d length %d", self._sampleIdx, length) unpacked = struct.unpack(">" + ('h' * (length // 2)), memoryview(bytearray(rawData)).tobytes()) # store the data in a dictionary mpu6050 = collections.OrderedDict() mpu6050[SAMPLE_TIME] = self._sampleIdx / self.fs sensorIdx = 0 if self.isAccelerometerEnabled(): mpu6050[ACCEL_X] = unpacked[sensorIdx] * self._accelerationFactor sensorIdx += 1 mpu6050[ACCEL_Y] = unpacked[sensorIdx] * self._accelerationFactor sensorIdx += 1 mpu6050[ACCEL_Z] = unpacked[sensorIdx] * self._accelerationFactor sensorIdx += 1 if self.isTemperatureEnabled(): mpu6050[TEMP] = unpacked[sensorIdx] * self._temperatureGain + self._temperatureOffset sensorIdx += 1 if self.isGyroEnabled(): mpu6050[GYRO_X] = unpacked[sensorIdx] * self._gyroFactor sensorIdx += 1 mpu6050[GYRO_Y] = unpacked[sensorIdx] * self._gyroFactor sensorIdx += 1 mpu6050[GYRO_Z] = unpacked[sensorIdx] * self._gyroFactor sensorIdx += 1 # TODO should we send as a dict so the keys are available? output = list(mpu6050.values()) self._sampleIdx += 1 # logger.debug("<< unpacked sample length %d into vals size %d", length, len(output)) return output
unpacks a single sample of data (where sample length is based on the currently enabled sensors). :param rawData: the data to convert :return: a converted data set.
entailment
def wrap(text, width, *args, **kwargs): """ Like :func:`textwrap.wrap` but preserves existing newlines which :func:`textwrap.wrap` does not otherwise handle well. See Also -------- :func:`textwrap.wrap` """ return sum([textwrap.wrap(line, width, *args, **kwargs) if line else [''] for line in text.splitlines()], [])
Like :func:`textwrap.wrap` but preserves existing newlines which :func:`textwrap.wrap` does not otherwise handle well. See Also -------- :func:`textwrap.wrap`
entailment
def textbox(text, width=78, boxchar='#', indent=0): """ Outputs line-wrapped text wrapped in a box drawn with a repeated (usually ASCII) character. For example: >>> print(textbox('Text to wrap', width=16)) ################ # # # Text to wrap # # # ################ Parameters ---------- text : string The text to wrap width : int The width of the entire box, including the perimeter and the indentation space. Because the wrapped text is padded with an additional column of whitespace on each side, the minimum width is 5--any width less than that is is automatically increased to 5 (default: 78) boxchar : string (No pun intended.) The character to draw the box with. May also be a string of multiple characters (default: '#') indent : int Amount of space by which the box should be indented. (default: 0) """ min_width = len(boxchar) * 2 + 3 width = max(width-indent, min_width) indentspace = indent * ' ' wrap_width = width - min_width + 1 q, r = divmod(width, len(boxchar)) # The top/bottom border top_border = indentspace + boxchar * q + boxchar[:r] top_padding = indentspace + boxchar + ' ' * (width - len(boxchar) * 2) + boxchar lines = ['%s%s %s %s' % (indentspace, boxchar, line.ljust(wrap_width), boxchar) for line in wrap(text, wrap_width)] top = [top_border, top_padding] bottom = [top_padding, top_border] return '\n'.join(top + lines + bottom)
Outputs line-wrapped text wrapped in a box drawn with a repeated (usually ASCII) character. For example: >>> print(textbox('Text to wrap', width=16)) ################ # # # Text to wrap # # # ################ Parameters ---------- text : string The text to wrap width : int The width of the entire box, including the perimeter and the indentation space. Because the wrapped text is padded with an additional column of whitespace on each side, the minimum width is 5--any width less than that is is automatically increased to 5 (default: 78) boxchar : string (No pun intended.) The character to draw the box with. May also be a string of multiple characters (default: '#') indent : int Amount of space by which the box should be indented. (default: 0)
entailment
def main(): """Entrypoint function.""" parser = argparse.ArgumentParser() parser.add_argument('-u', '--username', help='Hydro Quebec username') parser.add_argument('-p', '--password', help='Password') parser.add_argument('-j', '--json', action='store_true', default=False, help='Json output') parser.add_argument('-i', '--influxdb', action='store_true', default=False, help='InfluxDb output') parser.add_argument('-c', '--contract', default=None, help='Contract number') parser.add_argument('-l', '--list-contracts', action='store_true', default=False, help='List all your contracts') parser.add_argument('-H', '--hourly', action='store_true', default=False, help='Show yesterday hourly consumption') parser.add_argument('-t', '--timeout', default=REQUESTS_TIMEOUT, help='Request timeout') parser.add_argument('-V', '--version', action='store_true', default=False, help='Show version') raw_group = parser.add_argument_group('Detailled-energy raw download option') raw_group.add_argument('--detailled-energy', action='store_true', default=False, help='Get raw json output download') raw_group.add_argument('--start-date', default=(datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)).strftime("%Y-%m-%d"), help='Start date for detailled-output') raw_group.add_argument('--end-date', default=datetime.datetime.now(HQ_TIMEZONE).strftime("%Y-%m-%d"), help="End date for detailled-output") args = parser.parse_args() if args.version: print(VERSION) return 0 if not args.username or not args.password: parser.print_usage() print("pyhydroquebec: error: the following arguments are required: " "-u/--username, -p/--password") return 3 client = HydroQuebecClient(args.username, args.password, args.timeout) loop = asyncio.get_event_loop() if args.detailled_energy is False: async_func = client.fetch_data() else: start_date = datetime.datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.datetime.strptime(args.end_date, '%Y-%m-%d') async_func = client.fetch_data_detailled_energy_use(start_date, end_date) try: fut = asyncio.wait([async_func]) loop.run_until_complete(fut) except BaseException as exp: print(exp) return 1 finally: close_fut = asyncio.wait([client.close_session()]) loop.run_until_complete(close_fut) if not client.get_data(): return 2 if args.list_contracts: print("Contracts: {}".format(", ".join(client.get_contracts()))) elif args.influxdb: output_influx(client.get_data(args.contract)) elif args.json or args.detailled_energy: output_json(client.get_data(args.contract)) else: output_text(args.username, client.get_data(args.contract), args.hourly) return 0
Entrypoint function.
entailment
def easter(year): '''Calculate western easter''' # formula taken from http://aa.usno.navy.mil/faq/docs/easter.html c = trunc(year / 100) n = year - 19 * trunc(year / 19) k = trunc((c - 17) / 25) i = c - trunc(c / 4) - trunc((c - k) / 3) + (19 * n) + 15 i = i - 30 * trunc(i / 30) i = i - trunc(i / 28) * (1 - trunc(i / 28) * trunc(29 / (i + 1)) * trunc((21 - n) / 11)) j = year + trunc(year / 4) + i + 2 - c + trunc(c / 4) j = j - 7 * trunc(j / 7) l = i - j month = 3 + trunc((l + 40) / 44) day = l + 28 - 31 * trunc(month / 4) return year, int(month), int(day)
Calculate western easter
entailment
def independence_day(year, observed=None): '''July 4th''' day = 4 if observed: if calendar.weekday(year, JUL, 4) == SAT: day = 3 if calendar.weekday(year, JUL, 4) == SUN: day = 5 return (year, JUL, day)
July 4th
entailment
def columbus_day(year, country='usa'): '''in USA: 2nd Monday in Oct Elsewhere: Oct 12''' if country == 'usa': return nth_day_of_month(2, MON, OCT, year) else: return (year, OCT, 12)
in USA: 2nd Monday in Oct Elsewhere: Oct 12
entailment
def thanksgiving(year, country='usa'): '''USA: last Thurs. of November, Canada: 2nd Mon. of October''' if country == 'usa': if year in [1940, 1941]: return nth_day_of_month(3, THU, NOV, year) elif year == 1939: return nth_day_of_month(4, THU, NOV, year) else: return nth_day_of_month(0, THU, NOV, year) if country == 'canada': return nth_day_of_month(2, MON, OCT, year)
USA: last Thurs. of November, Canada: 2nd Mon. of October
entailment
def linefit(x, y, weights=None): """ Parameters ---------- y: 1D numpy array The data to be fitted x: 1D numpy array The x values of the y array. x and y must have the same shape. weights: 1D numpy array, must have the same shape as x and y weight values Examples -------- >>> import numpy as N >>> from numpy.core import around >>> x = N.array([-5, -4 ,-3 ,-2 ,-1, 0, 1, 2, 3, 4, 5]) >>> y = N.array([1, 5, 4, 7, 10, 8, 9, 13, 14, 13, 18]) >>> around(linefit(x,y), decimals=5) array([9.27273, 1.43636]) >>> x = N.array([1.3,1.3,2.0,2.0,2.7,3.3,3.3,3.7,3.7,4.,4.,4.,4.7,4.7,5.,5.3,5.3,5.3,5.7,6.,6.,6.3,6.7]) >>> y = N.array([2.3,1.8,2.8,1.5,2.2,3.8,1.8,3.7,1.7,2.8,2.8,2.2,3.2,1.9,1.8,3.5,2.8,2.1,3.4,3.2,3.,3.,5.9]) >>> around(linefit(x,y), decimals=5) array([1.42564, 0.31579]) """ if len(x) != len(y): print("Error: X and Y must have equal size\n") return n = len(x) w = N.zeros((n,n)).astype(N.float) if weights is None: for i in N.arange(n): w[i,i] = 1 else: if len(weights) != n: print("Error: Weights must have the same size as X and Y.\n") return for i in N.arange(n): w[i,i] = weights[i] x = x.astype(N.float) y = y.astype(N.float) # take the weighted avg for calculatiing the covarince Xavg = N.sum(N.dot(w,x)) / N.sum(w.diagonal()) Yavg = N.sum(N.dot(w,y)) / N.sum(w.diagonal()) xm = x - Xavg xmt = N.transpose(xm) ym = y - Yavg b1 = N.dot(xmt,N.dot(w,ym)) / N.dot(xmt ,N.dot(w,xm)) b0 = Yavg - b1 * Xavg return b0, b1
Parameters ---------- y: 1D numpy array The data to be fitted x: 1D numpy array The x values of the y array. x and y must have the same shape. weights: 1D numpy array, must have the same shape as x and y weight values Examples -------- >>> import numpy as N >>> from numpy.core import around >>> x = N.array([-5, -4 ,-3 ,-2 ,-1, 0, 1, 2, 3, 4, 5]) >>> y = N.array([1, 5, 4, 7, 10, 8, 9, 13, 14, 13, 18]) >>> around(linefit(x,y), decimals=5) array([9.27273, 1.43636]) >>> x = N.array([1.3,1.3,2.0,2.0,2.7,3.3,3.3,3.7,3.7,4.,4.,4.,4.7,4.7,5.,5.3,5.3,5.3,5.7,6.,6.,6.3,6.7]) >>> y = N.array([2.3,1.8,2.8,1.5,2.2,3.8,1.8,3.7,1.7,2.8,2.8,2.2,3.2,1.9,1.8,3.5,2.8,2.1,3.4,3.2,3.,3.,5.9]) >>> around(linefit(x,y), decimals=5) array([1.42564, 0.31579])
entailment
def get(self, measurementId): """ Analyses the measurement with the given parameters :param measurementId: :return: """ logger.info('Analysing ' + measurementId) measurement = self._measurementController.getMeasurement(measurementId, MeasurementStatus.COMPLETE) if measurement is not None: if measurement.inflate(): data = { name: { 'spectrum': { 'x': self._jsonify(data.spectrum('x')), 'y': self._jsonify(data.spectrum('y')), 'z': self._jsonify(data.spectrum('z')), 'sum': self._jsonify(data.spectrum('sum')) }, 'psd': { 'x': self._jsonify(data.psd('x')), 'y': self._jsonify(data.psd('y')), 'z': self._jsonify(data.psd('z')) }, 'peakSpectrum': { 'x': self._jsonify(data.peakSpectrum('x')), 'y': self._jsonify(data.peakSpectrum('y')), 'z': self._jsonify(data.peakSpectrum('z')), 'sum': self._jsonify(data.peakSpectrum('sum')) } } for name, data in measurement.data.items() } return data, 200 else: return None, 404 else: return None, 404
Analyses the measurement with the given parameters :param measurementId: :return:
entailment
def _applyTargetState(targetState, md, httpclient): """ compares the current device state against the targetStateProvider and issues updates as necessary to ensure the device is at that state. :param md: :param targetState: the target state. :param httpclient: the http client :return: """ anyUpdate = False if md['fs'] != targetState.fs: logger.info("Updating fs from " + str(md['fs']) + " to " + str(targetState.fs) + " for " + md['name']) anyUpdate = True if md['samplesPerBatch'] != targetState.samplesPerBatch: logger.info("Updating samplesPerBatch from " + str(md['samplesPerBatch']) + " to " + str( targetState.samplesPerBatch) + " for " + md['name']) anyUpdate = True if md['gyroEnabled'] != targetState.gyroEnabled: logger.info("Updating gyroEnabled from " + str(md['gyroEnabled']) + " to " + str( targetState.gyroEnabled) + " for " + md['name']) anyUpdate = True if md['gyroSens'] != targetState.gyroSens: logger.info( "Updating gyroSens from " + str(md['gyroSens']) + " to " + str(targetState.gyroSens) + " for " + md[ 'name']) anyUpdate = True if md['accelerometerEnabled'] != targetState.accelerometerEnabled: logger.info("Updating accelerometerEnabled from " + str(md['accelerometerEnabled']) + " to " + str( targetState.accelerometerEnabled) + " for " + md['name']) anyUpdate = True if md['accelerometerSens'] != targetState.accelerometerSens: logger.info("Updating accelerometerSens from " + str(md['accelerometerSens']) + " to " + str( targetState.accelerometerSens) + " for " + md['name']) anyUpdate = True if anyUpdate: payload = marshal(targetState, targetStateFields) logger.info("Applying target state change " + md['name'] + " - " + str(payload)) if RecordingDeviceStatus.INITIALISED.name == md.get('status'): try: httpclient.patch(md['serviceURL'], json=payload) except Exception as e: logger.exception(e) else: logger.warning("Ignoring target state change until " + md['name'] + " is idle, currently " + md['status']) else: logger.debug("Device " + md['name'] + " is at target state, we continue")
compares the current device state against the targetStateProvider and issues updates as necessary to ensure the device is at that state. :param md: :param targetState: the target state. :param httpclient: the http client :return:
entailment
def updateDeviceState(self, device): """ Updates the target state on the specified device. :param targetState: the target state to reach. :param device: the device to update. :return: """ # this is only threadsafe because the targetstate is effectively immutable, if it becomes mutable in future then # funkiness may result self._reactor.offer(REACH_TARGET_STATE, [self._targetStateProvider.state, device, self._httpclient])
Updates the target state on the specified device. :param targetState: the target state to reach. :param device: the device to update. :return:
entailment
def updateTargetState(self, newState): """ Updates the system target state and propagates that to all devices. :param newState: :return: """ self._targetStateProvider.state = loadTargetState(newState, self._targetStateProvider.state) for device in self.deviceController.getDevices(): self.updateDeviceState(device.payload)
Updates the system target state and propagates that to all devices. :param newState: :return:
entailment
def convert(input, width=132, output=None, keep=False): """Input ASCII trailer file "input" will be read. The contents will then be written out to a FITS file in the same format as used by 'stwfits' from IRAF. Parameters =========== input : str Filename of input ASCII trailer file width : int Number of characters wide to use for defining output FITS column [Default: 132] output : str Filename to use for writing out converted FITS trailer file If None, input filename will be converted from *.tra -> *_trl.fits [Default: None] keep : bool Specifies whether or not to keep any previously written FITS files [Default: False] """ # open input trailer file trl = open(input) # process all lines lines = np.array([i for text in trl.readlines() for i in textwrap.wrap(text,width=width)]) # close ASCII trailer file now that we have processed all the lines trl.close() if output is None: # create fits file rootname,suffix = os.path.splitext(input) s = suffix[1:].replace('ra','rl') fitsname = "{}_{}{}fits".format(rootname,s,os.path.extsep) else: fitsname = output full_name = os.path.abspath(os.path.join(os.path.curdir,fitsname)) old_file = os.path.exists(full_name) if old_file: if keep: print("ERROR: Trailer file already written out as: {}".format(full_name)) raise IOError else: os.remove(full_name) # Build FITS table and write it out line_fmt = "{}A".format(width) tbhdu = fits.BinTableHDU.from_columns([fits.Column(name='TEXT_FILE',format=line_fmt,array=lines)]) tbhdu.writeto(fitsname) print("Created output FITS filename for trailer:{} {}".format(os.linesep,full_name)) os.remove(input)
Input ASCII trailer file "input" will be read. The contents will then be written out to a FITS file in the same format as used by 'stwfits' from IRAF. Parameters =========== input : str Filename of input ASCII trailer file width : int Number of characters wide to use for defining output FITS column [Default: 132] output : str Filename to use for writing out converted FITS trailer file If None, input filename will be converted from *.tra -> *_trl.fits [Default: None] keep : bool Specifies whether or not to keep any previously written FITS files [Default: False]
entailment
def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. """ if levels is None: # first time called levels = [] results = [] if res == True: return results if res == False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return results for (key, val) in res.items(): if val == True: continue if isinstance(cfg.get(key), dict): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return results
An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*.
entailment
def get_extra_values(conf, _prepend=()): """ Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ out = [] out.extend([(_prepend, name) for name in conf.extra_values]) for name in conf.sections: if name not in conf.extra_values: out.extend(get_extra_values(conf[name], _prepend + (name,))) return out
Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list.
entailment
def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None and not isinstance(val, Section): break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None and not isinstance(val, Section): break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section
Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found.
entailment
def pop(self, key, default=MISSING): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ try: val = self[key] except KeyError: if default is MISSING: raise val = default else: del self[key] return val
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised'
entailment
def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val
Pops the first (key,val)
entailment
def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None self.defaults = [] self.extra_values = []
A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected
entailment
def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return list(zip((self.scalars + self.sections), list(self.values())))
D.items() -> list of D's (key, value) pairs, as 2-tuples
entailment
def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() # doctest: +SKIP >>> n == a # doctest: +SKIP 1 >>> n is a # doctest: +SKIP 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict
Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() # doctest: +SKIP >>> n == a # doctest: +SKIP 1 >>> n is a # doctest: +SKIP 0
entailment
def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ for key, val in list(indict.items()): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) else: self[key] = val
A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
entailment
def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment
Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments.
entailment
def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out
Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
entailment
def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, string_types): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val)
Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0
entailment
def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """ result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result]
A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1]
entailment
def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default
Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised.
entailment
def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults()
Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values.
entailment
def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in list(BOMS.items()): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): ### BOM discovered ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in list(BOMS.items()): if not isinstance(BOM, str) or not line.startswith(BOM): continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF8 - don't decode if isinstance(infile, string_types): return infile.splitlines(True) else: return infile # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, just return if isinstance(infile, string_types): # infile read from a file will be a single string return infile.splitlines(True) return infile
Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string.
entailment
def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, string_types): # can't be unicode # NOTE: Could raise a ``UnicodeDecodeError`` return infile.decode(encoding).splitlines(True) for i, line in enumerate(infile): # NOTE: The isinstance test here handles mixed lists of unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` if PY3K: if not isinstance(line, str): infile[i] = line.decode(encoding) else: if not isinstance(line, unicode): infile[i] = line.decode(encoding) return infile
Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list.
entailment
def _decode_element(self, line): """Decode element to unicode if necessary.""" if not self.encoding: return line if isinstance(line, str) and self.default_encoding: return line.decode(self.default_encoding) return line
Decode element to unicode if necessary.
entailment
def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: value, comment, cur_index = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if type(e) == UnknownType: msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values
Actually parse the config file.
entailment
def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError()
Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError.
entailment
def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = text % cur_index error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error)
Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index``
entailment
def _unquote(self, value): """Return an unquoted version of a value""" if not value: # should only happen during parsing of lists raise SyntaxError if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value
Return an unquoted version of a value
entailment
def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, string_types): if self.stringify: value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value
Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted.
entailment
def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment)
Given a value string, unquote, remove comment, handle lists. (including empty and single member lists)
entailment
def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index)
Extract the value, where we are in a multiline situation.
entailment
def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, _inspec=True) except ConfigObjError as e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError as e: raise IOError('Reading configspec failed: %s' % e) self.configspec = configspec
Parse the configspec.
entailment
def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} section[entry]._created = True if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry]
Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__
entailment
def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment))
Write an individual line, for the write method
entailment
def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment))
Write a section marker line
entailment
def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment)
Deal with a comment.
entailment
def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename # doctest: +SKIP >>> a.filename = 'test.ini' # doctest: +SKIP >>> a.write() # doctest: +SKIP >>> a.filename = filename # doctest: +SKIP >>> a == ConfigObj('test.ini', raise_errors=True) # doctest: +SKIP 1 >>> import os # doctest: +SKIP >>> os.remove('test.ini') # doctest: +SKIP """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, dict): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ((self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w' and sys.platform == 'win32' and newline == '\r\n'): # Windows specific hack to avoid writing '\r\r\n' newline = '\n' output = self._a_to_u(newline).join(out) if self.encoding: output = output.encode(self.encoding) if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output = BOM_UTF8 + output if not output.endswith(newline): output += newline if outfile is not None: outfile.write(output) else: # !!! write mode was 'wb' but that fails in PY3K and we dont need h = open(self.filename, 'w') h.write(output) h.close()
Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename # doctest: +SKIP >>> a.filename = 'test.ini' # doctest: +SKIP >>> a.write() # doctest: +SKIP >>> a.filename = filename # doctest: +SKIP >>> a == ConfigObj('test.ini', raise_errors=True) # doctest: +SKIP 1 >>> import os # doctest: +SKIP >>> os.remove('test.ini') # doctest: +SKIP
entailment
def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from .validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # # section.default_values.clear() #?? configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): section.default_values.pop(entry, None) try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError, validator.baseErrorClass): # No default, bad default or validator has no 'get_default_value' # (e.g. SimpleVal) pass try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass as e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and entry not in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) unvalidated = [] for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: unvalidated.append(entry) continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check == False: ret_true = False elif check == True: ret_false = False else: ret_true = False section.extra_values = unvalidated if preserve_errors and not section._created: # If the section wasn't created (i.e. it wasn't missing) # then we can't return False, we need to preserve errors ret_false = False # if ret_false and preserve_errors and out: # If we are preserving errors, but all # the failures are from missing sections / values # then we can return False. Otherwise there is a # real failure that we need to preserve. ret_false = not any(out.values()) if ret_true: return True elif ret_false: return False return out
Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages.
entailment
def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None
Clear ConfigObj instance and restore to 'freshly created' state.
entailment
def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, string_types): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec)
Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file.
entailment
def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member
A dummy check method, always returns the value unchanged.
entailment
def _processCommandLineArgs(): """ Get the command line arguments Parameters: NONE Returns: files list of file specifications to be converted outputFileNames list of output file specifications (one per input file) Default: a list of None values (one per input file) conversionFormat string indicating the conversion format requested Default: "mulitextension" verbose flag indicating if verbose output is desired Default: False Exceptions: NONE """ import getopt try: opts, args = getopt.getopt(sys.argv[1:], "hvmo:", ["help", "verbose", "multiExtensionConversion", "outputFileName"]) except getopt.GetoptError as e: print(str(e)) _usage() sys.exit(1) conversionFormat = "" outputFileNames = [] verbose = False for o, a in opts: if o in ("-h", "--help"): _usage() print(" Convert the waivered FITS Files (FILEs) to various formats.") print(" The default conversion format is multi-extension FITS.") print(" Options:") print(" -h, --help display this help message and exit") print(" -v, --verbose provide verbose output") print(" -m, --multiExtensionConversion convert to multiExtension FITS format") print(" -o, --outputFileName comma separated list of output file") print(" specifications (one per input FILE)") sys.exit() if o in ("-v", "--verbose"): verbose = True if o in ("-m", "--multiExtensionConversion"): if conversionFormat != "": print("convertwaiveredfits.py: only one conversion format allowed") _usage() sys.exit(1) conversionFormat = "multiExtension" if o in ("-o", "--outputFileName"): outputFileNames = a.split(',') if conversionFormat == "": # # Set the default conversion format if none was provided # conversionFormat = "multiExtension" if not args: print("convertwaiveredfits.py: nothing to convert") _usage() sys.exit(1) else: files = args if outputFileNames: if len(files) != len(outputFileNames): print("convertwaiveredfits.py: number of output file names does not match") print(" the number of FILEs to convert") _usage() sys.exit(1) else: for i in range(0,len(files)): outputFileNames.append(None) return files,outputFileNames,conversionFormat,verbose
Get the command line arguments Parameters: NONE Returns: files list of file specifications to be converted outputFileNames list of output file specifications (one per input file) Default: a list of None values (one per input file) conversionFormat string indicating the conversion format requested Default: "mulitextension" verbose flag indicating if verbose output is desired Default: False Exceptions: NONE
entailment
def _verify(waiveredHdul): """ Verify that the input HDUList is for a waivered FITS file. Parameters: waiveredHdul HDUList object to be verified Returns: None Exceptions: ValueError Input HDUList is not for a waivered FITS file """ if len(waiveredHdul) == 2: # # There must be exactly 2 HDU's # if waiveredHdul[0].header['NAXIS'] > 0: # # The Primary HDU must have some data # if isinstance(waiveredHdul[1], fits.TableHDU): # # The Alternate HDU must be a TableHDU # if waiveredHdul[0].data.shape[0] == \ waiveredHdul[1].data.shape[0] or \ waiveredHdul[1].data.shape[0] == 1: # # The number of arrays in the Primary HDU must match # the number of rows in the TableHDU. This includes # the case where there is only a single array and row. # return # # Not a valid waivered Fits file # raise ValueError("Input object does not represent a valid waivered" + \ " FITS file")
Verify that the input HDUList is for a waivered FITS file. Parameters: waiveredHdul HDUList object to be verified Returns: None Exceptions: ValueError Input HDUList is not for a waivered FITS file
entailment
def toMultiExtensionFits(waiveredObject, multiExtensionFileName=None, forceFileOutput=False, verbose=False): """ Convert the input waivered FITS object to a multi-extension FITS HDUList object. Generate an output multi-extension FITS file if requested. Parameters: waiveredObject input object representing a waivered FITS file; either a astroyp.io.fits.HDUList object, a file object, or a file specification outputFileName file specification for the output file Default: None - do not generate an output file forceFileOutput force the generation of an output file when the outputFileName parameter is None; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character 'h'. Default: False verbose provide verbose output Default: False Returns: mhdul an HDUList object in multi-extension FITS format. Exceptions: TypeError Input object is not a HDUList, a file object or a file name """ if isinstance(waiveredObject, fits.HDUList): whdul = waiveredObject inputObjectDescription = "HDUList object" else: try: whdul = fits.open(waiveredObject) if isinstance(waiveredObject, string_types): inputObjectDescription = "file " + waiveredObject else: inputObjectDescription = "file " + waiveredObject.name except TypeError: raise TypeError("Input object must be HDUList, file object, " + \ "or file name") _verify(whdul) undesiredPrimaryHeaderKeywords = ['ORIGIN','FITSDATE','FILENAME', 'ALLG-MAX','ALLG-MIN','ODATTYPE', 'SDASMGNU','OPSIZE','CTYPE2', 'CD2_2','CD2_1','CD1_2','CTYPE3', 'CD3_3','CD3_1','CD1_3','CD2_3', 'CD3_2'] # # Create the multi-extension primary header as a copy of the # wavered file primary header # mPHeader = whdul[0].header originalDataType = whdul[0].header.get('ODATTYPE','') # # Remove primary header cards with keywords matching the # list of undesired primary header keywords # for keyword in undesiredPrimaryHeaderKeywords: # # Be careful only to delete the first card that matches # the keyword, not all of the cards # if keyword in mPHeader: del mPHeader[mPHeader.index(keyword)] # # Get the columns from the secondary HDU table # wcols = whdul[1].columns # # Remove primary header cards with keywords matching the # column names in the secondary HDU table # for keyword in wcols.names: if keyword in mPHeader: del mPHeader[keyword] # # Create the PrimaryHDU # mPHdu = fits.PrimaryHDU(header=mPHeader) # # Add the EXTEND card # mPHdu.header.set('EXTEND', value=True, after='NAXIS') # # Add the NEXTEND card. There will be one extension # for each row in the wavered Fits file table HDU. # mPHdu.header['NEXTEND'] = (whdul[1].data.shape[0], 'Number of standard extensions') # # Create the multi-extension file HDUList from the primary header # mhdul = fits.HDUList([mPHdu]) # # Create the extension HDUs for the multi-extension file. There # will be one extension for each row in the wavered file's table. # instrument = mPHeader.get('INSTRUME', '') nrows = whdul[1].data.shape[0] for i in range(0,nrows): # # Create the basic HDU from the data # if nrows == 1: # # Handle case where there is only one row in the table # data = whdul[0].data else: data = whdul[0].data[i] mhdul.append(fits.ImageHDU(data)) # # Add cards to the header for each keyword in the column # names of the secondary HDU table from the wavered file # for keyword,format,unit in zip(wcols.names,wcols.formats,wcols.units): if unit == 'LOGICAL-': # # Handle logical values # if whdul[1].data.field(keyword)[i].strip() == 'T': d = True else: d = False elif format[0] == 'E': # # Handle floating point values # fmt = '%'+format[1:]+'G' d = eval(fmt % float(whdul[1].data.field(keyword)[i])) else: d = whdul[1].data.field(keyword)[i] kw_descr = "" if keyword in whdul[1].header: kw_descr = whdul[1].header[keyword] mhdul[i+1].header[keyword] = (d, kw_descr) # # If original data is unsigned short then scale the data. # if originalDataType == 'USHORT': mhdul[i+1].scale('int16','',bscale=1,bzero=32768) mhdul[i+1].header.set('BSCALE', value=1, before='BZERO') # # For WFPC2 and FOS instruments require additional header cards # if instrument in ('WFPC2','FOC'): # # Add EXTNAME card to header # mhdul[i+1].header['EXTNAME'] = (mPHeader.get('FILETYPE',''), 'extension name') # # Add EXTVER card to the header # mhdul[i+1]._extver = i+1 mhdul[i+1].header.set('EXTVER', value=i+1, comment='extension version number', after='EXTNAME') # # Add the EXPNAME card to the header # mhdul[i+1].header.set('EXPNAME', mPHeader.get('ROOTNAME', ''), '9 character exposure identifier', before='EXTVER') # # Add the INHERIT card to the header. # mhdul[i+1].header.set('INHERIT', True, 'inherit the primary header', after='EXTVER') # # Add the ROOTNAME card to the header # mhdul[i+1].header.set('ROOTNAME', mPHeader.get('ROOTNAME', ''), 'rootname of the observationset', after='INHERIT') if not multiExtensionFileName and forceFileOutput: base,ext = os.path.splitext(whdul[0]._file.name) multiExtensionFileName = base[:-1]+'h'+ext verboseString = "Input " + inputObjectDescription + \ " converted to multi-extension FITS format." if multiExtensionFileName: if instrument in ('WFPC2','FOC'): # # write the FILENAME card to the header for the WFPC2 and FOC # instruments # head,tail = os.path.split(multiExtensionFileName) mhdul[0].header.set('FILENAME', value=tail, after='NEXTEND') if ASTROPY_VER_GE13: mhdul.writeto(multiExtensionFileName, overwrite=True) else: mhdul.writeto(multiExtensionFileName, clobber=True) verboseString = verboseString[:-1] + " and written to " + \ multiExtensionFileName + "." if verbose: print(verboseString) return mhdul
Convert the input waivered FITS object to a multi-extension FITS HDUList object. Generate an output multi-extension FITS file if requested. Parameters: waiveredObject input object representing a waivered FITS file; either a astroyp.io.fits.HDUList object, a file object, or a file specification outputFileName file specification for the output file Default: None - do not generate an output file forceFileOutput force the generation of an output file when the outputFileName parameter is None; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character 'h'. Default: False verbose provide verbose output Default: False Returns: mhdul an HDUList object in multi-extension FITS format. Exceptions: TypeError Input object is not a HDUList, a file object or a file name
entailment
def convertwaiveredfits(waiveredObject, outputFileName=None, forceFileOutput=False, convertTo='multiExtension', verbose=False): """ Convert the input waivered FITS object to various formats. The default conversion format is multi-extension FITS. Generate an output file in the desired format if requested. Parameters: waiveredObject input object representing a waivered FITS file; either a astropy.io.fits.HDUList object, a file object, or a file specification outputFileName file specification for the output file Default: None - do not generate an output file forceFileOutput force the generation of an output file when the outputFileName parameter is None; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character `h` in multi-extension FITS format. Default: False convertTo target conversion type Default: 'multiExtension' verbose provide verbose output Default: False Returns: hdul an HDUList object in the requested format. Exceptions: ValueError Conversion type is unknown """ if convertTo == 'multiExtension': func = toMultiExtensionFits else: raise ValueError('Conversion type ' + convertTo + ' unknown') return func(*(waiveredObject,outputFileName,forceFileOutput,verbose))
Convert the input waivered FITS object to various formats. The default conversion format is multi-extension FITS. Generate an output file in the desired format if requested. Parameters: waiveredObject input object representing a waivered FITS file; either a astropy.io.fits.HDUList object, a file object, or a file specification outputFileName file specification for the output file Default: None - do not generate an output file forceFileOutput force the generation of an output file when the outputFileName parameter is None; the output file specification will be the same as the input file specification with the last character of the base name replaced with the character `h` in multi-extension FITS format. Default: False convertTo target conversion type Default: 'multiExtension' verbose provide verbose output Default: False Returns: hdul an HDUList object in the requested format. Exceptions: ValueError Conversion type is unknown
entailment
def to_jd(year, month, day): '''Determine Julian day from Persian date''' if year >= 0: y = 474 else: y = 473 epbase = year - y epyear = 474 + (epbase % 2820) if month <= 7: m = (month - 1) * 31 else: m = (month - 1) * 30 + 6 return day + m + trunc(((epyear * 682) - 110) / 2816) + (epyear - 1) * 365 + trunc(epbase / 2820) * 1029983 + (EPOCH - 1)
Determine Julian day from Persian date
entailment
def from_jd(jd): '''Calculate Persian date from Julian day''' jd = trunc(jd) + 0.5 depoch = jd - to_jd(475, 1, 1) cycle = trunc(depoch / 1029983) cyear = (depoch % 1029983) if cyear == 1029982: ycycle = 2820 else: aux1 = trunc(cyear / 366) aux2 = cyear % 366 ycycle = trunc(((2134 * aux1) + (2816 * aux2) + 2815) / 1028522) + aux1 + 1 year = ycycle + (2820 * cycle) + 474 if (year <= 0): year -= 1 yday = (jd - to_jd(year, 1, 1)) + 1 if yday <= 186: month = ceil(yday / 31) else: month = ceil((yday - 6) / 30) day = int(jd - to_jd(year, month, 1)) + 1 return (year, month, day)
Calculate Persian date from Julian day
entailment
def setup_global_logging(): """ Initializes capture of stdout/stderr, Python warnings, and exceptions; redirecting them to the loggers for the modules from which they originated. """ global global_logging_started if not PY3K: sys.exc_clear() if global_logging_started: return orig_logger_class = logging.getLoggerClass() logging.setLoggerClass(StreamTeeLogger) try: stdout_logger = logging.getLogger(__name__ + '.stdout') stderr_logger = logging.getLogger(__name__ + '.stderr') finally: logging.setLoggerClass(orig_logger_class) stdout_logger.setLevel(logging.INFO) stderr_logger.setLevel(logging.ERROR) stdout_logger.set_stream(sys.stdout) stderr_logger.set_stream(sys.stderr) sys.stdout = stdout_logger sys.stderr = stderr_logger exception_logger = logging.getLogger(__name__ + '.exc') sys.excepthook = LoggingExceptionHook(exception_logger) logging.captureWarnings(True) rawinput = 'input' if PY3K else 'raw_input' builtins._original_raw_input = getattr(builtins, rawinput) setattr(builtins, rawinput, global_logging_raw_input) global_logging_started = True
Initializes capture of stdout/stderr, Python warnings, and exceptions; redirecting them to the loggers for the modules from which they originated.
entailment
def teardown_global_logging(): """Disable global logging of stdio, warnings, and exceptions.""" global global_logging_started if not global_logging_started: return stdout_logger = logging.getLogger(__name__ + '.stdout') stderr_logger = logging.getLogger(__name__ + '.stderr') if sys.stdout is stdout_logger: sys.stdout = sys.stdout.stream if sys.stderr is stderr_logger: sys.stderr = sys.stderr.stream # If we still have an unhandled exception go ahead and handle it with the # replacement excepthook before deleting it exc_type, exc_value, exc_traceback = sys.exc_info() if exc_type is not None: sys.excepthook(exc_type, exc_value, exc_traceback) del exc_type del exc_value del exc_traceback if not PY3K: sys.exc_clear() del sys.excepthook logging.captureWarnings(False) rawinput = 'input' if PY3K else 'raw_input' if hasattr(builtins, '_original_raw_input'): setattr(builtins, rawinput, builtins._original_raw_input) del builtins._original_raw_input global_logging_started = False
Disable global logging of stdio, warnings, and exceptions.
entailment
def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None, stream=None, level=logging.INFO, filename=None, filemode='w', filelevel=None, propagate=True): """ Do basic configuration for the logging system. Similar to logging.basicConfig but the logger ``name`` is configurable and both a file output and a stream output can be created. Returns a logger object. The default behaviour is to create a logger called ``name`` with a null handled, and to use the "%(levelname)s: %(message)s" format string, and add the handler to the ``name`` logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. :param name: Logger name :param format: handler format string :param datefmt: handler date/time format specifier :param stream: add a StreamHandler using ``stream`` (None disables the stream, default=None) :param level: logger level (default=INFO). :param filename: add a FileHandler using ``filename`` (default=None) :param filemode: open ``filename`` with specified filemode ('w' or 'a') :param filelevel: logger level for file logger (default=``level``) :param propagate: propagate message to parent (default=True) :returns: logging.Logger object """ # Get a logger for the specified name logger = logging.getLogger(name) logger.setLevel(level) fmt = logging.Formatter(format, datefmt) logger.propagate = propagate # Remove existing handlers, otherwise multiple handlers can accrue for hdlr in logger.handlers: logger.removeHandler(hdlr) # Add handlers. Add NullHandler if no file or stream output so that # modules don't emit a warning about no handler. if not (filename or stream): logger.addHandler(logging.NullHandler()) if filename: hdlr = logging.FileHandler(filename, filemode) if filelevel is None: filelevel = level hdlr.setLevel(filelevel) hdlr.setFormatter(fmt) logger.addHandler(hdlr) if stream: hdlr = logging.StreamHandler(stream) hdlr.setLevel(level) hdlr.setFormatter(fmt) logger.addHandler(hdlr) return logger
Do basic configuration for the logging system. Similar to logging.basicConfig but the logger ``name`` is configurable and both a file output and a stream output can be created. Returns a logger object. The default behaviour is to create a logger called ``name`` with a null handled, and to use the "%(levelname)s: %(message)s" format string, and add the handler to the ``name`` logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. :param name: Logger name :param format: handler format string :param datefmt: handler date/time format specifier :param stream: add a StreamHandler using ``stream`` (None disables the stream, default=None) :param level: logger level (default=INFO). :param filename: add a FileHandler using ``filename`` (default=None) :param filemode: open ``filename`` with specified filemode ('w' or 'a') :param filelevel: logger level for file logger (default=``level``) :param propagate: propagate message to parent (default=True) :returns: logging.Logger object
entailment
def set_stream(self, stream): """ Set the stream that this logger is meant to replace. Usually this will be either `sys.stdout` or `sys.stderr`, but can be any object with `write()` and `flush()` methods, as supported by `logging.StreamHandler`. """ for handler in self.handlers[:]: if isinstance(handler, logging.StreamHandler): self.handlers.remove(handler) if stream is not None: stream_handler = logging.StreamHandler(stream) stream_handler.addFilter(_StreamHandlerEchoFilter()) stream_handler.setFormatter(logging.Formatter('%(message)s')) self.addHandler(stream_handler) self.stream = stream
Set the stream that this logger is meant to replace. Usually this will be either `sys.stdout` or `sys.stderr`, but can be any object with `write()` and `flush()` methods, as supported by `logging.StreamHandler`.
entailment
def write(self, message): """ Buffers each message until a newline is reached. Each complete line is then published to the logging system through ``self.log()``. """ self.__thread_local_ctx.write_count += 1 try: if self.__thread_local_ctx.write_count > 1: return # For each line in the buffer ending with \n, output that line to # the logger msgs = (self.buffer + message).split('\n') self.buffer = msgs.pop(-1) for m in msgs: self.log_orig(m, echo=True) finally: self.__thread_local_ctx.write_count -= 1
Buffers each message until a newline is reached. Each complete line is then published to the logging system through ``self.log()``.
entailment
def find_actual_caller(self): """ Returns the full-qualified module name, full pathname, line number, and function in which `StreamTeeLogger.write()` was called. For example, if this instance is used to replace `sys.stdout`, this will return the location of any print statement. """ # Gleaned from code in the logging module itself... try: f = sys._getframe(1) ##f = inspect.currentframe(1) except Exception: f = None # On some versions of IronPython, currentframe() returns None if # IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown module)", "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) mod = inspect.getmodule(f) if mod is None: modname = '__main__' else: modname = mod.__name__ if modname == __name__: # Crawl back until the first frame outside of this module f = f.f_back continue rv = (modname, filename, f.f_lineno, co.co_name) break return rv
Returns the full-qualified module name, full pathname, line number, and function in which `StreamTeeLogger.write()` was called. For example, if this instance is used to replace `sys.stdout`, this will return the location of any print statement.
entailment
def load(fp): ''' Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a XPORT document) to a Python object. ''' reader = reading.Reader(fp) keys = reader.fields columns = {k: [] for k in keys} for row in reader: for key, value in zip(keys, row): columns[key].append(value) return columns
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a XPORT document) to a Python object.
entailment