_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q40600 | DataSet._manifest | train | def _manifest(self):
"""Return manifest content."""
if self._manifest_cache is None:
self._manifest_cache = self._storage_broker.get_manifest()
return self._manifest_cache | python | {
"resource": ""
} |
q40601 | ProtoDataSet._identifiers | train | def _identifiers(self):
"""Return iterable of dataset item identifiers."""
for handle in self._storage_broker.iter_item_handles():
yield dtoolcore.utils.generate_identifier(handle) | python | {
"resource": ""
} |
q40602 | ProtoDataSet.create | train | def create(self):
"""Create the required directory structure and admin metadata."""
self._storage_broker.create_structure()
self._storage_broker.put_admin_metadata(self._admin_metadata) | python | {
"resource": ""
} |
q40603 | ProtoDataSet._generate_overlays | train | def _generate_overlays(self):
"""Return dictionary of overlays generated from added item metadata."""
overlays = defaultdict(dict)
for handle in self._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = self._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
return overlays | python | {
"resource": ""
} |
q40604 | add_papyrus_handler | train | def add_papyrus_handler(self, route_name_prefix, base_url, handler):
""" Add a Papyrus handler, i.e. a handler defining the MapFish
HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_handler(
'spots', '/spots', 'mypackage.handlers.SpotHandler')
Arguments:
``route_name_prefix`` The prefix used for the route names
passed to ``config.add_handler``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
``handler`` a dotted name or a reference to a handler class,
e.g. ``'mypackage.handlers.MyHandler'``.
"""
route_name = route_name_prefix + '_read_many'
self.add_handler(route_name, base_url, handler,
action='read_many', request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_handler(route_name, base_url + '/{id}', handler,
action='read_one', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_handler(route_name, base_url + '/count', handler,
action='count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_handler(route_name, base_url, handler,
action='create', request_method='POST')
route_name = route_name_prefix + '_update'
self.add_handler(route_name, base_url + '/{id}', handler,
action='update', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_handler(route_name, base_url + '/{id}', handler,
action='delete', request_method='DELETE') | python | {
"resource": ""
} |
q40605 | add_papyrus_routes | train | def add_papyrus_routes(self, route_name_prefix, base_url):
""" A helper method that adds routes to view callables that, together,
implement the MapFish HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_routes('spots', '/spots')
config.scan()
Arguments:
``route_name_prefix' The prefix used for the route names
passed to ``config.add_route``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
"""
route_name = route_name_prefix + '_read_many'
self.add_route(route_name, base_url, request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_route(route_name, base_url + '/{id}', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_route(route_name, base_url + '/count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_route(route_name, base_url, request_method='POST')
route_name = route_name_prefix + '_update'
self.add_route(route_name, base_url + '/{id}', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_route(route_name, base_url + '/{id}', request_method='DELETE') | python | {
"resource": ""
} |
q40606 | peak_model | train | def peak_model(f):
"""
Given a function that models a peak, add scale and location arguments to
For all functions, v is vertical offset, h is height
x is horizontal offset (1st moment), w is width (2nd moment),
s is skewness (3rd moment), e is excess (4th moment)
"""
@wraps(f)
def wrapped_f(t, **kw):
# load kwargs with default values
# do this here instead of in the def because we want to parse
# all of kwargs later to copy values to pass into f
def_vals = {'v': 0.0, 'h': 1.0, 'x': 0.0, 'w': 1.0, 's': 1.1, 'e': 1.0}
for v in def_vals:
if v not in kw:
kw[v] = def_vals[v]
# this copies all of the defaults into what the peak function needs
anames, _, _, _ = inspect.getargspec(f)
fkw = dict([(arg, kw[arg]) for arg in anames if arg in kw])
# some functions use location or width parameters explicitly
# if not, adjust the timeseries accordingly
ta = t
if 'x' not in anames:
ta = ta - kw['x']
if 'w' not in anames:
ta = ta / kw['w']
# finally call the function
mod = f(ta, **fkw)
# recalcualte, making the peak maximize at x
mod = f(ta + ta[mod.argmax()], **fkw)
return kw['v'] + kw['h'] / max(mod) * mod
args = set(['v', 'h', 'x', 'w'])
anames, _, _, _ = inspect.getargspec(f)
wrapped_f._peakargs = list(args.union([a for a in anames
if a not in ('t', 'r')]))
return wrapped_f | python | {
"resource": ""
} |
q40607 | _findNearest | train | def _findNearest(arr, value):
""" Finds the value in arr that value is closest to
"""
arr = np.array(arr)
# find nearest value in array
idx = (abs(arr-value)).argmin()
return arr[idx] | python | {
"resource": ""
} |
q40608 | _createMagConversionDict | train | def _createMagConversionDict():
""" loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
"""
magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat')
raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5')
magDict = {}
for row in raw_table:
if sys.hexversion >= 0x03000000:
starClass = row[1].decode("utf-8") # otherwise we get byte ints or b' caused by 2to3
tableData = [x.decode("utf-8") for x in row[3:]]
else:
starClass = row[1]
tableData = row[3:]
magDict[starClass] = tableData
return magDict | python | {
"resource": ""
} |
q40609 | _BaseObject._getParentClass | train | def _getParentClass(self, startClass, parentClass):
""" gets the parent class by calling successive parent classes with .parent until parentclass is matched.
"""
try:
if not startClass: # reached system with no hits
raise AttributeError
except AttributeError: # i.e calling binary on an object without one
raise HierarchyError('This object ({0}) has no {1} as a parent object'.format(self.name, parentClass))
if startClass.classType == parentClass:
return startClass
else:
return self._getParentClass(startClass.parent, parentClass) | python | {
"resource": ""
} |
q40610 | Star.d | train | def d(self):
""" Note this should work from child parents as .d propergates, calculates using the star estimation method
estimateDistance and estimateAbsoluteMagnitude
"""
# TODO this will only work from a star or below. good thing?
d = self.parent.d
if ed_params.estimateMissingValues:
if d is np.nan:
d = self.estimateDistance()
if d is not np.nan:
self.flags.addFlag('Estimated Distance')
return d
else:
return np.nan | python | {
"resource": ""
} |
q40611 | Star.getLimbdarkeningCoeff | train | def getLimbdarkeningCoeff(self, wavelength=1.22): # TODO replace with pylightcurve
""" Looks up quadratic limb darkening parameter from the star based on T, logg and metalicity.
:param wavelength: microns
:type wavelength: float
:return: limb darkening coefficients 1 and 2
"""
# TODO check this returns correct value - im not certain
# The intervals of values in the tables
tempind = [ 3500., 3750., 4000., 4250., 4500., 4750., 5000., 5250., 5500., 5750., 6000., 6250.,
6500., 6750., 7000., 7250., 7500., 7750., 8000., 8250., 8500., 8750., 9000., 9250.,
9500., 9750., 10000., 10250., 10500., 10750., 11000., 11250., 11500., 11750., 12000., 12250.,
12500., 12750., 13000., 14000., 15000., 16000., 17000., 19000., 20000., 21000., 22000., 23000.,
24000., 25000., 26000., 27000., 28000., 29000., 30000., 31000., 32000., 33000., 34000., 35000.,
36000., 37000., 38000., 39000., 40000., 41000., 42000., 43000., 44000., 45000., 46000., 47000.,
48000., 49000., 50000.]
lggind = [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5.]
mhind = [-5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., -0.5, -0.3, -0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.5, 1.]
# Choose the values in the table nearest our parameters
tempselect = _findNearest(tempind, float(self.T))
lgselect = _findNearest(lggind, float(self.calcLogg()))
mhselect = _findNearest(mhind, float(self.Z))
quadratic_filepath = resource_stream(__name__, 'data/quadratic.dat')
coeffTable = np.loadtxt(quadratic_filepath)
foundValues = False
for i in range(len(coeffTable)):
if coeffTable[i, 2] == lgselect and coeffTable[i, 3] == tempselect and coeffTable[i, 4] == mhselect:
if coeffTable[i, 0] == 1:
u1array = coeffTable[i, 8:] # Limb darkening parameter u1 for each wl in waveind
u2array = coeffTable[i+1, 8:]
foundValues = True
break
if not foundValues:
raise ValueError('No limb darkening values could be found') # TODO replace with better exception
waveind = [0.365, 0.445, 0.551, 0.658, 0.806, 1.22, 1.63, 2.19, 3.45] # Wavelengths available in table
# Interpolates the value at wavelength from values in the table (waveind)
u1AtWavelength = np.interp(wavelength, waveind, u1array, left=0, right=0)
u2AtWavelength = np.interp(wavelength, waveind, u2array, left=0, right=0)
return u1AtWavelength, u2AtWavelength | python | {
"resource": ""
} |
q40612 | Planet.calcTemperature | train | def calcTemperature(self):
""" Calculates the temperature using which uses equations.MeanPlanetTemp, albedo assumption and potentially
equations.starTemperature.
issues
- you cant get the albedo assumption without temp but you need it to calculate the temp.
"""
try:
return eq.MeanPlanetTemp(self.albedo, self.star.T, self.star.R, self.a).T_p
except (ValueError, HierarchyError): # ie missing value (.a) returning nan
return np.nan | python | {
"resource": ""
} |
q40613 | Planet.calcSMA | train | def calcSMA(self):
""" Calculates the semi-major axis from Keplers Third Law
"""
try:
return eq.KeplersThirdLaw(None, self.star.M, self.P).a
except HierarchyError:
return np.nan | python | {
"resource": ""
} |
q40614 | Planet.calcSMAfromT | train | def calcSMAfromT(self, epsilon=0.7):
""" Calculates the semi-major axis based on planet temperature
"""
return eq.MeanPlanetTemp(self.albedo(), self.star.T, self.star.R, epsilon, self.T).a | python | {
"resource": ""
} |
q40615 | Planet.calcPeriod | train | def calcPeriod(self):
""" calculates period using a and stellar mass
"""
return eq.KeplersThirdLaw(self.a, self.star.M).P | python | {
"resource": ""
} |
q40616 | Parameters.addParam | train | def addParam(self, key, value, attrib=None):
""" Checks the key dosnt already exist, adds alternate names to a seperate list
Future
- format input and add units
- logging
"""
if key in self.rejectTags:
return False # TODO Replace with exception
# Temporary code to handle the seperation tag than can occur several times with different units.
# TODO code a full multi unit solution (github issue #1)
if key == 'separation':
if attrib is None:
return False # reject seperations without a unit
try:
if not attrib['unit'] == 'AU':
return False # reject for now
except KeyError: # a seperation attribute exists but not one for units
return False
if key in self.params: # if already exists
if key == 'name':
try: # if flagged as a primary or popular name use this one, an option should be made to use either
if attrib['type'] == 'pri': # first names or popular names.
oldname = self.params['name']
self.params['altnames'].append(oldname)
self.params['name'] = value
else:
self.params['altnames'].append(value)
except (KeyError, TypeError): # KeyError = no type key in attrib dict, TypeError = not a dict
self.params['altnames'].append(value)
elif key == 'list':
self.params['list'].append(value)
else:
try:
name = self.params['name']
except KeyError:
name = 'Unnamed'
print('rejected duplicate {0}: {1} in {2}'.format(key, value, name)) # TODO: log rejected value
return False # TODO Replace with exception
else: # If the key doesn't already exist and isn't rejected
# Some tags have no value but a upperlimit in the attributes
if value is None and attrib is not None:
try:
value = attrib['upperlimit']
except KeyError:
try:
value = attrib['lowerlimit']
except KeyError:
return False
if key == 'rightascension':
value = _ra_string_to_unit(value)
elif key == 'declination':
value = _dec_string_to_unit(value)
elif key in self._defaultUnits:
try:
value = float(value) * self._defaultUnits[key]
except:
print('caught an error with {0} - {1}'.format(key, value))
self.params[key] = value | python | {
"resource": ""
} |
q40617 | SpectralType.roundedSpecClass | train | def roundedSpecClass(self):
""" Spectral class with rounded class number ie A8.5V is A9 """
try:
classnumber = str(int(np.around(self.classNumber)))
except TypeError:
classnumber = str(self.classNumber)
return self.classLetter + classnumber | python | {
"resource": ""
} |
q40618 | SpectralType._parseSpecType | train | def _parseSpecType(self, classString):
""" This class attempts to parse the spectral type. It should probably use more advanced matching use regex
"""
try:
classString = str(classString)
except UnicodeEncodeError:
# This is for the benefit of 1RXS1609 which currently has the spectral type K7\pm 1V
# TODO add unicode support and handling for this case / ammend the target
return False
# some initial cases
if classString == '' or classString == 'nan':
return False
possNumbers = range(10)
possLType = ('III', 'II', 'Iab', 'Ia0', 'Ia', 'Ib', 'IV', 'V') # in order of unique matches
# remove spaces, remove slashes
classString = classString.replace(' ', '')
classString = classString.replace('-', '/')
classString = classString.replace('\\', '/')
classString = classString.split('/')[0] # TODO we do not consider slashed classes yet (intemediates)
# check first 3 chars for spectral types
stellarClass = classString[:3]
if stellarClass in _possSpectralClasses:
self.classLetter = stellarClass
elif stellarClass[:2] in _possSpectralClasses: # needed because A5V wouldnt match before
self.classLetter = stellarClass[:2]
elif stellarClass[0] in _possSpectralClasses:
self.classLetter = stellarClass[0]
else:
return False # assume a non standard class and fail
# get number
try:
numIndex = len(self.classLetter)
classNum = int(classString[numIndex])
if classNum in possNumbers:
self.classNumber = int(classNum) # don't consider decimals here, done at the type check
typeString = classString[numIndex+1:]
else:
return False # invalid number received
except IndexError: # reached the end of the string
return True
except ValueError: # i.e its a letter - fail # TODO multi letter checking
typeString = classString[1:]
if typeString == '': # ie there is no more information as in 'A8'
return True
# Now check for a decimal and handle those cases
if typeString[0] == '.':
# handle decimal cases, we check each number in turn, add them as strings and then convert to float and add
# to original number
decimalNumbers = '.'
for number in typeString[1:]:
try:
if int(number) in possNumbers:
decimalNumbers += number
else:
print('Something went wrong in decimal checking') # TODO replace with logging
return False # somethings gone wrong
except ValueError:
break # recevied a non-number (probably L class)
# add decimal to classNum
try:
self.classNumber += float(decimalNumbers)
except ValueError: # probably trying to convert '.' to a float
pass
typeString = typeString[len(decimalNumbers):]
if len(typeString) is 0:
return True
# Handle luminosity class
for possL in possLType: # match each possible case in turn (in order of uniqueness)
Lcase = typeString[:len(possL)] # match from front with length to minimise matching say IV in '<3 CIV'
if possL == Lcase:
self.lumType = possL
return True
if not self.classNumber == '':
return True
else: # if there no number asumme we have a name ie 'Catac. var.'
self.classLetter = ''
self.classNumber = ''
self.lumType = ''
return False | python | {
"resource": ""
} |
q40619 | Magnitude._convert_to_from | train | def _convert_to_from(self, to_mag, from_mag, fromVMag=None):
""" Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag
"""
lumtype = self.spectral_type.lumType
# rounds decimal types, TODO perhaps we should interpolate?
specClass = self.spectral_type.roundedSpecClass
if not specClass: # TODO investigate implications of this
raise ValueError('Can not convert when no spectral class is given')
if lumtype not in ('V', ''):
raise ValueError("Can only convert for main sequence stars. Got {0} type".format(lumtype))
if to_mag == 'V':
col, sign = self.column_for_V_conversion[from_mag]
try: # TODO replace with pandas table
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
from_mag_val = self.__dict__['mag'+from_mag] # safer than eval
if isNanOrNone(from_mag_val):
# logger.debug('2 '+from_mag)
raise ValueError('You cannot convert from a magnitude you have not specified in class')
return from_mag_val + (offset*sign)
elif from_mag == 'V':
if fromVMag is None:
# trying to second guess here could mess up a K->B calulation by using the intermediate measured V. While
# this would probably be preferable it is not was was asked and therefore could give unexpected results
raise ValueError('Must give fromVMag, even if it is self.magV')
col, sign = self.column_for_V_conversion[to_mag]
try:
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
return fromVMag + (offset*sign*-1) # -1 as we are now converting the other way
else:
raise ValueError('Can only convert from and to V magnitude. Use .convert() instead') | python | {
"resource": ""
} |
q40620 | AgilentMWD2._get_str | train | def _get_str(self, f, off):
"""
Convenience function to quickly pull out strings.
"""
f.seek(off)
return f.read(2 * struct.unpack('>B', f.read(1))[0]).decode('utf-16') | python | {
"resource": ""
} |
q40621 | delta13c_constants | train | def delta13c_constants():
"""
Constants for calculating delta13C values from ratios.
From website of Verkouteren & Lee 2001 Anal. Chem.
"""
# possible values for constants (from NIST)
cst = OrderedDict()
cst['Craig'] = {'S13': 0.0112372, 'S18': 0.002079,
'K': 0.008333, 'A': 0.5}
cst['IAEA'] = {'S13': 0.0112372, 'S18': 0.00206716068,
'K': 0.0091993, 'A': 0.5}
cst['Werner'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0093704, 'A': 0.516}
cst['Santrock'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0099235, 'A': 0.516}
cst['Assonov'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0102819162, 'A': 0.528}
cst['Assonov2'] = {'S13': 0.0111802, 'S18': 0.0020052,
'K': 0.0102819162, 'A': 0.528}
cst['Isodat'] = {'S13': 0.0111802, 'S18': 0.0020052,
'K': 0.0099235, 'A': 0.516}
return cst | python | {
"resource": ""
} |
q40622 | delta13c_craig | train | def delta13c_craig(r45sam, r46sam, d13cstd, r45std, r46std,
ks='Craig', d18ostd=23.5):
"""
Algorithm from Craig 1957.
From the original Craig paper, we can set up a pair of equations
and solve for d13C and d18O simultaneously:
d45 * r45 = r13 * d13
+ 0.5 * r17 * d18
d46 = r13 * ((r17**2 + r17 - r18) / a) * d13
+ 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18
where a = r18 + r13 * r17 and b = 1 + r13 + r17
"""
# the constants for the calculations
# originally r13, r17, r18 = 1123.72e-5, 759.9e-6, 415.8e-5
k = delta13c_constants()[ks]
# TODO: not clear why need to multiply by 2?
r13, r18 = k['S13'], 2 * k['S18']
r17 = 2 * (k['K'] * k['S18'] ** k['A'])
a = (r18 + r13 * r17) * (1. + r13 + r17)
# the coefficients for the calculations
eqn_mat = np.array([[r13, 0.5 * r17],
[r13 * ((r17 ** 2 + r17 - r18) / a),
1 - 0.5 * r17 * ((r13 ** 2 + r13 - r18) / a)]])
# precalculate the d45 and d46 of the standard versus PDB
r45d45std = (eqn_mat[0, 0] * d13cstd + eqn_mat[0, 1] * d18ostd)
d46std = eqn_mat[1, 0] * d13cstd + eqn_mat[1, 1] * d18ostd
# calculate the d45 and d46 of our sample versus PDB
# in r45d45, r45 of PDB = r13 + r17 of PDB
r45d45 = 1000. * (r45sam / r45std - 1.) * \
(r13 + r17 + 0.001 * r45d45std) + r45d45std
d46 = 1000. * (r46sam / r46std - 1.) * (1. + 0.001 * d46std) + d46std
# solve the system of equations
x = np.linalg.solve(eqn_mat, np.array([r45d45, d46]))
return x[0] | python | {
"resource": ""
} |
q40623 | delta13c_santrock | train | def delta13c_santrock(r45sam, r46sam, d13cstd, r45std, r46std,
ks='Santrock', d18ostd=23.5):
"""
Given the measured isotope signals of a sample and a
standard and the delta-13C of that standard, calculate
the delta-13C of the sample.
Algorithm from Santrock, Studley & Hayes 1985 Anal. Chem.
"""
k = delta13c_constants()[ks]
# function for calculating 17R from 18R
def c17(r):
return k['K'] * r ** k['A']
rcpdb, rosmow = k['S13'], k['S18']
# known delta values for the ref peak
r13std = (d13cstd / 1000. + 1) * rcpdb
r18std = (d18ostd / 1000. + 1) * rosmow
# determine the correction factors
c45 = r13std + 2 * c17(r18std)
c46 = c17(r18std) ** 2 + 2 * r13std * c17(r18std) + 2 * r18std
# correct the voltage ratios to ion ratios
r45 = (r45sam / r45std) * c45
r46 = (r46sam / r46std) * c46
def rf(r18):
return -3 * c17(r18) ** 2 + 2 * r45 * c17(r18) + 2 * r18 - r46
# r18 = scipy.optimize.root(rf, r18std).x[0] # use with scipy 0.11.0
r18 = fsolve(rf, r18std)[0]
r13 = r45 - 2 * c17(r18)
return 1000 * (r13 / rcpdb - 1) | python | {
"resource": ""
} |
q40624 | walk | train | def walk(zk, path='/'):
"""Yields all paths under `path`."""
children = zk.get_children(path)
yield path
for child in children:
if path == '/':
subpath = "/%s" % child
else:
subpath = "%s/%s" % (path, child)
for child in walk(zk, subpath):
yield child | python | {
"resource": ""
} |
q40625 | pretty_print_head | train | def pretty_print_head(dict_, count=10): #TODO only format and rename to pretty_head
'''
Pretty print some items of a dict.
For an unordered dict, ``count`` arbitrary items will be printed.
Parameters
----------
dict_ : ~typing.Dict
Dict to print from.
count : int
Number of items to print.
Raises
------
ValueError
When ``count < 1``.
'''
if count < 1:
raise ValueError('`count` must be at least 1')
pprint(dict(take(count, dict_.items()))) | python | {
"resource": ""
} |
q40626 | invert | train | def invert(dict_): #TODO return a MultiDict right away
'''
Invert dict by swapping each value with its key.
Parameters
----------
dict_ : ~typing.Dict[~typing.Hashable, ~typing.Hashable]
Dict to invert.
Returns
-------
~typing.Dict[~typing.Hashable, ~typing.Set[~typing.Hashable]]
Dict with keys and values swapped.
See also
--------
pytil.multi_dict.MultiDict : Multi-dict view of a ``Dict[Hashable, Set[Hashable]]`` dict.
Notes
-----
If your dict never has 2 keys mapped to the same value, you can convert it
to a ``Dict[Hashable, Hashable]`` dict using::
from pytil.multi_dict import MultiDict
inverted_dict = dict(MultiDict(inverted_dict))
Examples
--------
>>> invert({1: 2, 3: 4})
{2: {1}, 4: {3}}
>>> invert({1: 2, 3: 2, 4: 5})
{2: {1,3}, 5: {4}}
'''
result = defaultdict(lambda: set())
for k, val in dict_.items():
result[val].add(k)
return dict(result) | python | {
"resource": ""
} |
q40627 | RequestPaginator.delete | train | def delete(self, json=None):
"""Send a DELETE request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('delete', url=self.endpoint, json=json) | python | {
"resource": ""
} |
q40628 | RequestPaginator.put | train | def put(self, json=None):
"""Send a PUT request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('put', url=self.endpoint, json=json) | python | {
"resource": ""
} |
q40629 | RequestPaginator._call | train | def _call(self, method, *args, **kwargs):
"""Call the remote service and return the response data."""
assert self.session
if not kwargs.get('verify'):
kwargs['verify'] = self.SSL_VERIFY
response = self.session.request(method, *args, **kwargs)
response_json = response.text and response.json() or {}
if response.status_code < 200 or response.status_code >= 300:
message = response_json.get('error', response_json.get('message'))
raise HelpScoutRemoteException(response.status_code, message)
self.page_current = response_json.get(self.PAGE_CURRENT, 1)
self.page_total = response_json.get(self.PAGE_TOTAL, 1)
try:
return response_json[self.PAGE_DATA_MULTI]
except KeyError:
pass
try:
return [response_json[self.PAGE_DATA_SINGLE]]
except KeyError:
pass
return None | python | {
"resource": ""
} |
q40630 | KeyedPRF.eval | train | def eval(self, x):
"""This method returns the evaluation of the function with input x
:param x: this is the input as a Long
"""
aes = AES.new(self.key, AES.MODE_CFB, "\0" * AES.block_size)
while True:
nonce = 0
data = KeyedPRF.pad(SHA256.new(str(x + nonce).encode()).digest(),
(number.size(self.range) + 7) // 8)
num = self.mask & number.bytes_to_long(aes.encrypt(data))
if (num < self.range):
return num
nonce += 1 | python | {
"resource": ""
} |
q40631 | merge_by_overlap | train | def merge_by_overlap(sets):
'''
Of a list of sets, merge those that overlap, in place.
The result isn't necessarily a subsequence of the original ``sets``.
Parameters
----------
sets : ~typing.Sequence[~typing.Set[~typing.Any]]
Sets of which to merge those that overlap. Empty sets are ignored.
Notes
-----
Implementation is based on `this StackOverflow answer`_. It outperforms all
other algorithms in the thread (visited at dec 2015) on python3.4 using a
wide range of inputs.
.. _this StackOverflow answer: http://stackoverflow.com/a/9453249/1031434
Examples
--------
>>> merge_by_overlap([{1,2}, set(), {2,3}, {4,5,6}, {6,7}])
[{1,2,3}, {4,5,6,7}]
'''
data = sets
bins = list(range(len(data))) # Initialize each bin[n] == n
nums = dict()
for r, row in enumerate(data):
if not row:
data[r] = None
else:
for num in row:
if num not in nums:
# New number: tag it with a pointer to this row's bin
nums[num] = r
continue
else:
dest = _locate_bin(bins, nums[num])
if dest == r:
continue # already in the same bin
if dest > r:
dest, r = r, dest # always merge into the smallest bin
data[dest].update(data[r])
data[r] = None
# Update our indices to reflect the move
bins[r] = dest
r = dest
# Remove empty bins
for i in reversed(range(len(data))):
if not data[i]:
del data[i] | python | {
"resource": ""
} |
q40632 | AuthProxy.auth_proxy | train | def auth_proxy(self, method):
"""Authentication proxy for API requests.
This is required because the API objects are naive of ``HelpScout``,
so they would otherwise be unauthenticated.
Args:
method (callable): A method call that should be authenticated. It
should accept a ``requests.Session`` as its first parameter,
which should be used for the actual API call.
Returns:
mixed: The results of the authenticated callable.
"""
def _proxy(*args, **kwargs):
"""The actual proxy, which instantiates and authenticates the API.
Args:
*args (mixed): Args to send to class instantiation.
**kwargs (mixed): Kwargs to send to class instantiation.
Returns:
mixed: The result of the authenticated callable.
"""
return method(self.session, *args, **kwargs)
return _proxy | python | {
"resource": ""
} |
q40633 | Users.find_in_mailbox | train | def find_in_mailbox(cls, session, mailbox_or_id):
"""Get the users that are associated to a Mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox_or_id (MailboxRef or int): Mailbox of the ID of the
mailbox to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.User): Users
iterator.
"""
if hasattr(mailbox_or_id, 'id'):
mailbox_or_id = mailbox_or_id.id
return cls(
'/mailboxes/%d/users.json' % mailbox_or_id,
session=session,
) | python | {
"resource": ""
} |
q40634 | _hash_the_file | train | def _hash_the_file(hasher, filename):
"""Helper function for creating hash functions.
See implementation of :func:`dtoolcore.filehasher.shasum`
for more usage details.
"""
BUF_SIZE = 65536
with open(filename, 'rb') as f:
buf = f.read(BUF_SIZE)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(BUF_SIZE)
return hasher | python | {
"resource": ""
} |
q40635 | enable_precompute | train | def enable_precompute(panel):
"""Schedule a precompute task for `panel`"""
use_metis = panel['data_source']['source_type'] == 'querybuilder'
if use_metis:
query = panel['data_source']['query']
else:
query = "u'''%s'''" % panel['data_source']['code']
precompute = panel['data_source']['precompute']
timeframe = panel['data_source']['timeframe']
bucket_width = precompute['bucket_width']['value']
time_scale = precompute['bucket_width']['scale']['name']
bucket_width_seconds = get_seconds(bucket_width, time_scale)
if timeframe['mode']['value'] == 'recent':
untrusted_time = precompute['untrusted_time']['value']
untrusted_time_scale = precompute['untrusted_time']['scale']['name']
untrusted_time_seconds = get_seconds(untrusted_time, untrusted_time_scale)
# Schedule the task with an interval equal to the bucket_width
interval = bucket_width_seconds
elif timeframe['mode']['value'] == 'range':
untrusted_time_seconds = 0
# Schedule the task with an interval of 0 so it only runs once
interval = 0
task_code = PRECOMPUTE_INITIALIZATION_CODE % (query, timeframe,
bucket_width_seconds,
untrusted_time_seconds,
use_metis)
result = scheduler_client.schedule(task_code, interval)
if result['status'] != 'success':
raise RuntimeError(result.get('reason'))
return result['id'] | python | {
"resource": ""
} |
q40636 | disable_precompute | train | def disable_precompute(panel):
"""Cancel precomputation for `panel`"""
task_id = panel['data_source']['precompute']['task_id']
result = scheduler_client.cancel(task_id)
if result['status'] != 'success':
raise RuntimeError(result.get('reason')) | python | {
"resource": ""
} |
q40637 | QueryCompute._get_timeframe_bounds | train | def _get_timeframe_bounds(self, timeframe, bucket_width):
"""
Get a `bucket_width` aligned `start_time` and `end_time` from a
`timeframe` dict
"""
if bucket_width:
bucket_width_seconds = bucket_width
bucket_width = epoch_time_to_kronos_time(bucket_width)
# TODO(derek): Potential optimization by setting the end_time equal to the
# untrusted_time if end_time > untrusted_time and the results are not being
# output to the user (only for caching)
if timeframe['mode']['value'] == 'recent':
# Set end_time equal to now and align to bucket width
end_time = kronos_time_now()
original_end_time = end_time
duration = get_seconds(timeframe['value'], timeframe['scale']['name'])
duration = epoch_time_to_kronos_time(duration)
start_time = original_end_time - duration
if bucket_width:
# Align values to the bucket width
# TODO(derek): Warn the user that the timeframe has been altered to fit
# the bucket width
if (end_time % bucket_width) != 0:
end_time += bucket_width - (end_time % bucket_width)
if (start_time % bucket_width) != 0:
start_time -= (start_time % bucket_width)
start = kronos_time_to_datetime(start_time)
end = kronos_time_to_datetime(end_time)
elif timeframe['mode']['value'] == 'range':
end = datetime.datetime.strptime(timeframe['to'], DT_FORMAT)
end_seconds = datetime_to_epoch_time(end)
start = datetime.datetime.strptime(timeframe['from'], DT_FORMAT)
start_seconds = datetime_to_epoch_time(start)
if bucket_width:
# Align values to the bucket width
# TODO(derek): Warn the user that the timeframe has been altered to fit
# the bucket width
start_bump = start_seconds % bucket_width_seconds
start -= datetime.timedelta(seconds=start_bump)
if (end_seconds % bucket_width_seconds) != 0:
end_bump = bucket_width_seconds - (end_seconds % bucket_width_seconds)
end += datetime.timedelta(seconds=end_bump)
else:
raise ValueError("Timeframe mode must be 'recent' or 'range'")
return start, end | python | {
"resource": ""
} |
q40638 | QueryCompute.compute | train | def compute(self, use_cache=True):
"""Call a user defined query and return events with optional help from
the cache.
:param use_cache: Specifies whether the cache should be used when possible
"""
if use_cache:
if not self._bucket_width:
raise ValueError('QueryCompute must be initialized with a bucket_width'
' to use caching features.')
return list(self._query_cache.retrieve_interval(self._start_time,
self._end_time,
compute_missing=True))
else:
if self._metis:
return self._run_metis(self._start_time, self._end_time)
else:
return self._run_query(self._start_time, self._end_time) | python | {
"resource": ""
} |
q40639 | QueryCompute.cache | train | def cache(self):
"""Call a user defined query and cache the results"""
if not self._bucket_width or self._untrusted_time is None:
raise ValueError('QueryCompute must be initialized with a bucket_width '
'and an untrusted_time in order to write to the cache.')
now = datetime.datetime.now()
untrusted_time = now - datetime.timedelta(seconds=self._untrusted_time)
list(self._query_cache.compute_and_cache_missing_buckets(
self._start_time,
self._end_time,
untrusted_time)) | python | {
"resource": ""
} |
q40640 | Customers.list | train | def list(cls, session, first_name=None, last_name=None, email=None,
modified_since=None):
"""List the customers.
Customers can be filtered on any combination of first name, last name,
email, and modifiedSince.
Args:
session (requests.sessions.Session): Authenticated session.
first_name (str, optional): First name of customer.
last_name (str, optional): Last name of customer.
email (str, optional): Email address of customer.
modified_since (datetime.datetime, optional): If modified after
this date.
Returns:
RequestPaginator(output_type=helpscout.models.Customer): Customers
iterator.
"""
return super(Customers, cls).list(
session,
data=cls.__object__.get_non_empty_vals({
'firstName': first_name,
'lastName': last_name,
'email': email,
'modifiedSince': modified_since,
})
) | python | {
"resource": ""
} |
q40641 | Customers.search | train | def search(cls, session, queries):
"""Search for a customer given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Customers, cls).search(session, queries, SearchCustomer) | python | {
"resource": ""
} |
q40642 | create_token | train | def create_token(key, payload):
"""Auth token generator
payload should be a json encodable data structure
"""
token = hmac.new(key)
token.update(json.dumps(payload))
return token.hexdigest() | python | {
"resource": ""
} |
q40643 | get_app | train | def get_app(settings_file=None):
"""Get scheduler app singleton
The app configuration is performed when the function is run for the first
time.
Because the scheduler is a threaded enviroment, it is important that this
function be thread-safe. The scheduler instance is not created until the
`commands/runscheduler.py` script is executed, and this function is first
invoked by the scheduler.py management script. In other words, this function
is guaranteed to run to completion (by the management script) before the
scheduler thread is spawned. Should that ever change, locks would need to be
added here.
"""
global _APP
if _APP:
return _APP
_APP = Flask(__name__)
db.init_app(_APP)
migrate = Migrate(_APP, db, directory='scheduler/migrations')
_APP.config.from_pyfile('../jia/conf/default_settings.py')
if settings_file:
if not settings_file.startswith('/'):
settings_file = os.path.join(os.pardir, settings_file)
_APP.config.from_pyfile(settings_file, silent=True)
_APP.config.update(PORT=_APP.config['SCHEDULER_PORT'])
_APP.config.update(SQLALCHEMY_DATABASE_URI=_APP.config['SCHEDULER_DATABASE_URI'])
_APP.secret_key = _APP.config['SECRET_KEY']
from scheduler.views import scheduler
_APP.register_blueprint(scheduler)
return _APP | python | {
"resource": ""
} |
q40644 | Conversations.create | train | def create(cls, session, record, imported=False, auto_reply=False):
"""Create a conversation.
Please note that conversation cannot be created with more than 100
threads, if attempted the API will respond with HTTP 412.
Args:
session (requests.sessions.Session): Authenticated session.
record (helpscout.models.Conversation): The conversation
to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
auto_reply (bool): The ``auto_reply`` request parameter enables
auto replies to be sent when a conversation is created via the
API. When ``auto_reply`` is set to ``True``, an auto reply will
be sent as long as there is at least one ``customer`` thread in
the conversation.
Returns:
helpscout.models.Conversation: Newly created conversation.
"""
return super(Conversations, cls).create(
session,
record,
imported=imported,
auto_reply=auto_reply,
) | python | {
"resource": ""
} |
q40645 | Conversations.create_attachment | train | def create_attachment(cls, session, attachment):
"""Create an attachment.
An attachment must be sent to the API before it can be used in a
thread. Use this method to create the attachment, then use the
resulting hash when creating a thread.
Note that HelpScout only supports attachments of 10MB or lower.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to be
created.
Returns:
helpscout.models.Attachment: The newly created attachment (hash
property only). Use this hash when associating the attachment with
a new thread.
"""
return super(Conversations, cls).create(
session,
attachment,
endpoint_override='/attachments.json',
out_type=Attachment,
) | python | {
"resource": ""
} |
q40646 | Conversations.create_thread | train | def create_thread(cls, session, conversation, thread, imported=False):
"""Create a conversation thread.
Please note that threads cannot be added to conversations with 100
threads (or more), if attempted the API will respond with HTTP 412.
Args:
conversation (helpscout.models.Conversation): The conversation
that the thread is being added to.
session (requests.sessions.Session): Authenticated session.
thread (helpscout.models.Thread): The thread to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
Returns:
helpscout.models.Conversation: Conversation including newly created
thread.
"""
return super(Conversations, cls).create(
session,
thread,
endpoint_override='/conversations/%s.json' % conversation.id,
imported=imported,
) | python | {
"resource": ""
} |
q40647 | Conversations.delete_attachment | train | def delete_attachment(cls, session, attachment):
"""Delete an attachment.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to
be deleted.
Returns:
NoneType: Nothing.
"""
return super(Conversations, cls).delete(
session,
attachment,
endpoint_override='/attachments/%s.json' % attachment.id,
out_type=Attachment,
) | python | {
"resource": ""
} |
q40648 | Conversations.find_customer | train | def find_customer(cls, session, mailbox, customer):
"""Return conversations for a specific customer in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
customer (helpscout.models.Customer): Customer to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/customers/%s/conversations.json' % (
mailbox.id, customer.id,
),
session=session,
) | python | {
"resource": ""
} |
q40649 | Conversations.find_user | train | def find_user(cls, session, mailbox, user):
"""Return conversations for a specific user in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
user (helpscout.models.User): User to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/users/%s/conversations.json' % (
mailbox.id, user.id,
),
session=session,
) | python | {
"resource": ""
} |
q40650 | Conversations.get_attachment_data | train | def get_attachment_data(cls, session, attachment_id):
"""Return a specific attachment's data.
Args:
session (requests.sessions.Session): Authenticated session.
attachment_id (int): The ID of the attachment from which to get
data.
Returns:
helpscout.models.AttachmentData: An attachment data singleton, if
existing. Otherwise ``None``.
"""
return cls(
'/attachments/%d/data.json' % attachment_id,
singleton=True,
session=session,
out_type=AttachmentData,
) | python | {
"resource": ""
} |
q40651 | Conversations.list | train | def list(cls, session, mailbox):
"""Return conversations in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
endpoint = '/mailboxes/%d/conversations.json' % mailbox.id
return super(Conversations, cls).list(session, endpoint) | python | {
"resource": ""
} |
q40652 | Conversations.list_folder | train | def list_folder(cls, session, mailbox, folder):
"""Return conversations in a specific folder of a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox that folder is in.
folder (helpscout.models.Folder): Folder to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/folders/%s/conversations.json' % (
mailbox.id, folder.id,
),
session=session,
) | python | {
"resource": ""
} |
q40653 | Conversations.search | train | def search(cls, session, queries):
"""Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Conversations, cls).search(
session, queries, SearchConversation,
) | python | {
"resource": ""
} |
q40654 | Conversations.update_thread | train | def update_thread(cls, session, conversation, thread):
"""Update a thread.
Args:
session (requests.sessions.Session): Authenticated session.
conversation (helpscout.models.Conversation): The conversation
that the thread belongs to.
thread (helpscout.models.Thread): The thread to be updated.
Returns:
helpscout.models.Conversation: Conversation including freshly
updated thread.
"""
data = thread.to_api()
data['reload'] = True
return cls(
'/conversations/%s/threads/%d.json' % (
conversation.id, thread.id,
),
data=data,
request_type=RequestPaginator.PUT,
singleton=True,
session=session,
) | python | {
"resource": ""
} |
q40655 | set_level | train | def set_level(logger, level):
'''
Temporarily change log level of logger.
Parameters
----------
logger : str or ~logging.Logger
Logger name or logger whose log level to change.
level : int
Log level to set.
Examples
--------
>>> with set_level('sqlalchemy.engine', logging.INFO):
... pass # sqlalchemy log level is set to INFO in this block
'''
if isinstance(logger, str):
logger = logging.getLogger(logger)
original = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(original) | python | {
"resource": ""
} |
q40656 | configure | train | def configure(log_file):
'''
Configure root logger to log INFO to stderr and DEBUG to log file.
The log file is appended to. Stderr uses a terse format, while the log file
uses a verbose unambiguous format.
Root level is set to INFO.
Parameters
----------
log_file : ~pathlib.Path
File to log to.
Returns
-------
~typing.Tuple[~logging.StreamHandler, ~logging.FileHandler]
Stderr and file handler respectively.
'''
# Note: do not use logging.basicConfig as it does not play along with caplog in testing
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
# log info to stderr in terse format
stderr_handler = logging.StreamHandler() # to stderr
stderr_handler.setLevel(logging.INFO)
stderr_handler.setFormatter(logging.Formatter('{levelname[0]}: {message}', style='{'))
root_logger.addHandler(stderr_handler)
# log debug to file in full format
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('{levelname[0]} {asctime} {name} ({module}:{lineno}):\n{message}\n', style='{'))
root_logger.addHandler(file_handler)
return stderr_handler, file_handler | python | {
"resource": ""
} |
q40657 | MnemonicsDataReader.get_pandasframe | train | def get_pandasframe(self):
"""The method loads data from dataset"""
if self.dataset:
self._load_dimensions()
return self._get_pandasframe_one_dataset()
return self._get_pandasframe_across_datasets() | python | {
"resource": ""
} |
q40658 | KnoemaSeries.add_value | train | def add_value(self, value, index_point):
"""The function is addeing new value to provied index. If index does not exist"""
if index_point not in self.index:
self.values.append(value)
self.index.append(index_point) | python | {
"resource": ""
} |
q40659 | KnoemaSeries.get_pandas_series | train | def get_pandas_series(self):
"""The function creates pandas series based on index and values"""
return pandas.Series(self.values, self.index, name=self.name) | python | {
"resource": ""
} |
q40660 | TemporaryDirectory | train | def TemporaryDirectory(suffix=None, prefix=None, dir=None, on_error='ignore'): # @ReservedAssignment
'''
An extension to `tempfile.TemporaryDirectory`.
Unlike with `python:tempfile`, a :py:class:`~pathlib.Path` is yielded on
``__enter__``, not a `str`.
Parameters
----------
suffix : str
See `tempfile.TemporaryDirectory`.
prefix : str
See `tempfile.TemporaryDirectory`.
dir : ~pathlib.Path
See `tempfile.TemporaryDirectory`, but pass a :py:class:`~pathlib.Path` instead.
on_error : str
Handling of failure to delete directory (happens frequently on NFS), one of:
raise
Raise exception on failure.
ignore
Fail silently.
'''
if dir:
dir = str(dir) # @ReservedAssignment
temp_dir = tempfile.TemporaryDirectory(suffix, prefix, dir)
try:
yield Path(temp_dir.name)
finally:
try:
temp_dir.cleanup()
except OSError as ex:
print(ex)
# Suppress relevant errors if ignoring failed delete
if on_error != 'ignore' or ex.errno != errno.ENOTEMPTY:
raise | python | {
"resource": ""
} |
q40661 | hash | train | def hash(path, hash_function=hashlib.sha512): # @ReservedAssignment
'''
Hash file or directory.
Parameters
----------
path : ~pathlib.Path
File or directory to hash.
hash_function : ~typing.Callable[[], hash object]
Function which creates a hashlib hash object when called. Defaults to
``hashlib.sha512``.
Returns
-------
hash object
hashlib hash object of file/directory contents. File/directory stat data
is ignored. The directory digest covers file/directory contents and
their location relative to the directory being digested. The directory
name itself is ignored.
'''
hash_ = hash_function()
if path.is_dir():
for directory, directories, files in os.walk(str(path), topdown=True):
# Note:
# - directory: path to current directory in walk relative to current working direcotry
# - directories/files: dir/file names
# Note: file names can contain nearly any character (even newlines).
# hash like (ignore the whitespace):
#
# h(relative-dir-path)
# h(dir_name)
# h(dir_name2)
# ,
# h(file_name) h(file_content)
# h(file_name2) h(file_content2)
# ;
# h(relative-dir-path2)
# ...
hash_.update(hash_function(str(Path(directory).relative_to(path)).encode()).digest())
for name in sorted(directories):
hash_.update(hash_function(name.encode()).digest())
hash_.update(b',')
for name in sorted(files):
hash_.update(hash_function(name.encode()).digest())
hash_.update(hash(Path(directory) / name).digest())
hash_.update(b';')
else:
with path.open('rb') as f:
while True:
buffer = f.read(65536)
if not buffer:
break
hash_.update(buffer)
return hash_ | python | {
"resource": ""
} |
q40662 | diff_identifiers | train | def diff_identifiers(a, b):
"""Return list of tuples where identifiers in datasets differ.
Tuple structure:
(identifier, present in a, present in b)
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples where identifiers in datasets differ
"""
a_ids = set(a.identifiers)
b_ids = set(b.identifiers)
difference = []
for i in a_ids.difference(b_ids):
difference.append((i, True, False))
for i in b_ids.difference(a_ids):
difference.append((i, False, True))
return difference | python | {
"resource": ""
} |
q40663 | diff_sizes | train | def diff_sizes(a, b, progressbar=None):
"""Return list of tuples where sizes differ.
Tuple structure:
(identifier, size in a, size in b)
Assumes list of identifiers in a and b are identical.
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples for all items with different sizes
"""
difference = []
for i in a.identifiers:
a_size = a.item_properties(i)["size_in_bytes"]
b_size = b.item_properties(i)["size_in_bytes"]
if a_size != b_size:
difference.append((i, a_size, b_size))
if progressbar:
progressbar.update(1)
return difference | python | {
"resource": ""
} |
q40664 | diff_content | train | def diff_content(a, reference, progressbar=None):
"""Return list of tuples where content differ.
Tuple structure:
(identifier, hash in a, hash in reference)
Assumes list of identifiers in a and b are identical.
Storage broker of reference used to generate hash for files in a.
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples for all items with different content
"""
difference = []
for i in a.identifiers:
fpath = a.item_content_abspath(i)
calc_hash = reference._storage_broker.hasher(fpath)
ref_hash = reference.item_properties(i)["hash"]
if calc_hash != ref_hash:
info = (i, calc_hash, ref_hash)
difference.append(info)
if progressbar:
progressbar.update(1)
return difference | python | {
"resource": ""
} |
q40665 | cli | train | def cli():
"""Parse options from the command line"""
parser = argparse.ArgumentParser(prog="sphinx-serve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler="resolve",
description=__doc__
)
parser.add_argument("-v", "--version", action="version",
version="%(prog)s {0}".format(__version__)
)
parser.add_argument("-h", "--host", action="store",
default="0.0.0.0",
help="Listen to the given hostname"
)
parser.add_argument("-p", "--port", action="store",
type=int, default=8081,
help="Listen to given port"
)
parser.add_argument("-b", "--build", action="store",
default="_build",
help="Build folder name"
)
parser.add_argument("-s", "--single", action="store_true",
help="Serve the single-html documentation version"
)
return parser.parse_args() | python | {
"resource": ""
} |
q40666 | find_build_dir | train | def find_build_dir(path, build="_build"):
"""try to guess the build folder's location"""
path = os.path.abspath(os.path.expanduser(path))
contents = os.listdir(path)
filtered_contents = [directory for directory in contents
if os.path.isdir(os.path.join(path, directory))]
if build in filtered_contents:
return os.path.join(path, build)
else:
if path == os.path.realpath("/"):
return None
else:
return find_build_dir("{0}/..".format(path), build) | python | {
"resource": ""
} |
q40667 | flip_uuid_parts | train | def flip_uuid_parts(uuid):
"""
Flips high and low segments of the timestamp portion of a UUID string.
This enables correct lexicographic sorting. Because it is a simple flip,
this function works in both directions.
"""
flipped_uuid = uuid.split('-')
flipped_uuid[0], flipped_uuid[2] = flipped_uuid[2], flipped_uuid[0]
flipped_uuid = '-'.join(flipped_uuid)
return flipped_uuid | python | {
"resource": ""
} |
q40668 | genExampleStar | train | def genExampleStar(binaryLetter='', heirarchy=True):
""" generates example star, if binaryLetter is true creates a parent binary object, if heirarchy is true will create a
system and link everything up
"""
starPar = StarParameters()
starPar.addParam('age', '7.6')
starPar.addParam('magB', '9.8')
starPar.addParam('magH', '7.4')
starPar.addParam('magI', '7.6')
starPar.addParam('magJ', '7.5')
starPar.addParam('magK', '7.3')
starPar.addParam('magV', '9.0')
starPar.addParam('mass', '0.98')
starPar.addParam('metallicity', '0.43')
starPar.addParam('name', 'Example Star {0}{1}'.format(ac._ExampleSystemCount, binaryLetter))
starPar.addParam('name', 'HD {0}{1}'.format(ac._ExampleSystemCount, binaryLetter))
starPar.addParam('radius', '0.95')
starPar.addParam('spectraltype', 'G5')
starPar.addParam('temperature', '5370')
exampleStar = Star(starPar.params)
exampleStar.flags.addFlag('Fake')
if heirarchy:
if binaryLetter:
exampleBinary = genExampleBinary()
exampleBinary._addChild(exampleStar)
exampleStar.parent = exampleBinary
else:
exampleSystem = genExampleSystem()
exampleSystem._addChild(exampleStar)
exampleStar.parent = exampleSystem
return exampleStar | python | {
"resource": ""
} |
q40669 | config_list | train | def config_list(backend):
"""
Print the current configuration
"""
click.secho('Print Configuration', fg='green')
print str(backend.dki.get_config()) | python | {
"resource": ""
} |
q40670 | recipe_status | train | def recipe_status(backend):
"""
Compare local recipe to remote recipe for the current recipe.
"""
kitchen = DKCloudCommandRunner.which_kitchen_name()
if kitchen is None:
raise click.ClickException('You are not in a Kitchen')
recipe_dir = DKRecipeDisk.find_recipe_root_dir()
if recipe_dir is None:
raise click.ClickException('You must be in a Recipe folder')
recipe_name = DKRecipeDisk.find_recipe_name()
click.secho("%s - Getting the status of Recipe '%s' in Kitchen '%s'\n\tversus directory '%s'" % (
get_datetime(), recipe_name, kitchen, recipe_dir), fg='green')
check_and_print(DKCloudCommandRunner.recipe_status(backend.dki, kitchen, recipe_name, recipe_dir)) | python | {
"resource": ""
} |
q40671 | recipe_conflicts | train | def recipe_conflicts(backend):
"""
See if there are any unresolved conflicts for this recipe.
"""
recipe_dir = DKRecipeDisk.find_recipe_root_dir()
if recipe_dir is None:
raise click.ClickException('You must be in a Recipe folder.')
recipe_name = DKRecipeDisk.find_recipe_name()
click.secho("%s - Checking for conflicts on Recipe '%s'" % (
get_datetime(),recipe_name))
recipe_name = DKRecipeDisk.find_recipe_name()
check_and_print(DKCloudCommandRunner.get_unresolved_conflicts(recipe_name, recipe_dir)) | python | {
"resource": ""
} |
q40672 | kitchen_list | train | def kitchen_list(backend):
"""
List all Kitchens
"""
click.echo(click.style('%s - Getting the list of kitchens' % get_datetime(), fg='green'))
check_and_print(DKCloudCommandRunner.list_kitchen(backend.dki)) | python | {
"resource": ""
} |
q40673 | kitchen_get | train | def kitchen_get(backend, kitchen_name, recipe):
"""
Get an existing Kitchen
"""
found_kitchen = DKKitchenDisk.find_kitchen_name()
if found_kitchen is not None and len(found_kitchen) > 0:
raise click.ClickException("You cannot get a kitchen into an existing kitchen directory structure.")
if len(recipe) > 0:
click.secho("%s - Getting kitchen '%s' and the recipes %s" % (get_datetime(), kitchen_name, str(recipe)), fg='green')
else:
click.secho("%s - Getting kitchen '%s'" % (get_datetime(), kitchen_name), fg='green')
check_and_print(DKCloudCommandRunner.get_kitchen(backend.dki, kitchen_name, os.getcwd(), recipe)) | python | {
"resource": ""
} |
q40674 | kitchen_create | train | def kitchen_create(backend, parent, kitchen):
"""
Create a new kitchen
"""
click.secho('%s - Creating kitchen %s from parent kitchen %s' % (get_datetime(), kitchen, parent), fg='green')
master = 'master'
if kitchen.lower() != master.lower():
check_and_print(DKCloudCommandRunner.create_kitchen(backend.dki, parent, kitchen))
else:
raise click.ClickException('Cannot create a kitchen called %s' % master) | python | {
"resource": ""
} |
q40675 | kitchen_delete | train | def kitchen_delete(backend, kitchen):
"""
Provide the name of the kitchen to delete
"""
click.secho('%s - Deleting kitchen %s' % (get_datetime(), kitchen), fg='green')
master = 'master'
if kitchen.lower() != master.lower():
check_and_print(DKCloudCommandRunner.delete_kitchen(backend.dki, kitchen))
else:
raise click.ClickException('Cannot delete the kitchen called %s' % master) | python | {
"resource": ""
} |
q40676 | kitchen_config | train | def kitchen_config(backend, kitchen, add, get, unset, listall):
"""
Get and Set Kitchen variable overrides
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
check_and_print(DKCloudCommandRunner.config_kitchen(backend.dki, use_kitchen, add, get, unset, listall)) | python | {
"resource": ""
} |
q40677 | kitchen_merge | train | def kitchen_merge(backend, source_kitchen, target_kitchen):
"""
Merge two Kitchens
"""
click.secho('%s - Merging Kitchen %s into Kitchen %s' % (get_datetime(), source_kitchen, target_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.merge_kitchens_improved(backend.dki, source_kitchen, target_kitchen)) | python | {
"resource": ""
} |
q40678 | recipe_list | train | def recipe_list(backend, kitchen):
"""
List the Recipes in a Kitchen
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho("%s - Getting the list of Recipes for Kitchen '%s'" % (get_datetime(), use_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.list_recipe(backend.dki, use_kitchen)) | python | {
"resource": ""
} |
q40679 | recipe_create | train | def recipe_create(backend, kitchen, name):
"""
Create a new Recipe
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho("%s - Creating Recipe %s for Kitchen '%s'" % (get_datetime(), name, use_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.recipe_create(backend.dki, use_kitchen,name)) | python | {
"resource": ""
} |
q40680 | recipe_get | train | def recipe_get(backend, recipe):
"""
Get the latest files for this recipe.
"""
recipe_root_dir = DKRecipeDisk.find_recipe_root_dir()
if recipe_root_dir is None:
if recipe is None:
raise click.ClickException("\nPlease change to a recipe folder or provide a recipe name arguement")
# raise click.ClickException('You must be in a Recipe folder')
kitchen_root_dir = DKKitchenDisk.is_kitchen_root_dir()
if not kitchen_root_dir:
raise click.ClickException("\nPlease change to a recipe folder or a kitchen root dir.")
recipe_name = recipe
start_dir = DKKitchenDisk.find_kitchen_root_dir()
else:
recipe_name = DKRecipeDisk.find_recipe_name()
if recipe is not None:
if recipe_name != recipe:
raise click.ClickException("\nThe recipe name argument '%s' is inconsistent with the current directory '%s'" % (recipe, recipe_root_dir))
start_dir = recipe_root_dir
kitchen_name = Backend.get_kitchen_name_soft()
click.secho("%s - Getting the latest version of Recipe '%s' in Kitchen '%s'" % (get_datetime(), recipe_name, kitchen_name), fg='green')
check_and_print(DKCloudCommandRunner.get_recipe(backend.dki, kitchen_name, recipe_name, start_dir)) | python | {
"resource": ""
} |
q40681 | file_add | train | def file_add(backend, kitchen, recipe, message, filepath):
"""
Add a newly created file to a Recipe
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
if recipe is None:
recipe = DKRecipeDisk.find_recipe_name()
if recipe is None:
raise click.ClickException('You must be in a recipe folder, or provide a recipe name.')
click.secho('%s - Adding File (%s) to Recipe (%s) in kitchen(%s) with message (%s)' %
(get_datetime(), filepath, recipe, use_kitchen, message), fg='green')
check_and_print(DKCloudCommandRunner.add_file(backend.dki, use_kitchen, recipe, message, filepath)) | python | {
"resource": ""
} |
q40682 | file_update_all | train | def file_update_all(backend, message, dryrun):
"""
Update all of the changed files for this Recipe
"""
kitchen = DKCloudCommandRunner.which_kitchen_name()
if kitchen is None:
raise click.ClickException('You must be in a Kitchen')
recipe_dir = DKRecipeDisk.find_recipe_root_dir()
if recipe_dir is None:
raise click.ClickException('You must be in a Recipe folder')
recipe = DKRecipeDisk.find_recipe_name()
if dryrun:
click.secho('%s - Display all changed files in Recipe (%s) in Kitchen(%s) with message (%s)' %
(get_datetime(), recipe, kitchen, message), fg='green')
else:
click.secho('%s - Updating all changed files in Recipe (%s) in Kitchen(%s) with message (%s)' %
(get_datetime(), recipe, kitchen, message), fg='green')
check_and_print(DKCloudCommandRunner.update_all_files(backend.dki, kitchen, recipe, recipe_dir, message, dryrun)) | python | {
"resource": ""
} |
q40683 | file_resolve | train | def file_resolve(backend, filepath):
"""
Mark a conflicted file as resolved, so that a merge can be completed
"""
recipe = DKRecipeDisk.find_recipe_name()
if recipe is None:
raise click.ClickException('You must be in a recipe folder.')
click.secho("%s - Resolving conflicts" % get_datetime())
for file_to_resolve in filepath:
if not os.path.exists(file_to_resolve):
raise click.ClickException('%s does not exist' % file_to_resolve)
check_and_print(DKCloudCommandRunner.resolve_conflict(file_to_resolve)) | python | {
"resource": ""
} |
q40684 | active_serving_watcher | train | def active_serving_watcher(backend, kitchen, period):
"""
Watches all cooking Recipes in a Kitchen
Provide the kitchen name as an argument or be in a Kitchen folder.
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho('%s - Watching Active OrderRun Changes in Kitchen %s' % (get_datetime(), use_kitchen), fg='green')
DKCloudCommandRunner.watch_active_servings(backend.dki, use_kitchen, period)
while True:
try:
DKCloudCommandRunner.join_active_serving_watcher_thread_join()
if not DKCloudCommandRunner.watcher_running():
break
except KeyboardInterrupt:
print 'KeyboardInterrupt'
exit_gracefully(None, None)
exit(0) | python | {
"resource": ""
} |
q40685 | order_delete | train | def order_delete(backend, kitchen, order_id):
"""
Delete one order or all orders in a kitchen
"""
use_kitchen = Backend.get_kitchen_name_soft(kitchen)
print use_kitchen
if use_kitchen is None and order_id is None:
raise click.ClickException('You must specify either a kitchen or an order_id or be in a kitchen directory')
if order_id is not None:
click.secho('%s - Delete an Order using id %s' % (get_datetime(), order_id), fg='green')
check_and_print(DKCloudCommandRunner.delete_one_order(backend.dki, order_id))
else:
click.secho('%s - Delete all orders in Kitchen %s' % (get_datetime(), use_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.delete_all_order(backend.dki, use_kitchen)) | python | {
"resource": ""
} |
q40686 | order_stop | train | def order_stop(backend, order_id):
"""
Stop an order - Turn off the serving generation ability of an order. Stop any running jobs. Keep all state around.
"""
if order_id is None:
raise click.ClickException('invalid order id %s' % order_id)
click.secho('%s - Stop order id %s' % (get_datetime(), order_id), fg='green')
check_and_print(DKCloudCommandRunner.stop_order(backend.dki, order_id)) | python | {
"resource": ""
} |
q40687 | order_stop | train | def order_stop(backend, order_run_id):
"""
Stop the run of an order - Stop the running order and keep all state around.
"""
if order_run_id is None:
raise click.ClickException('invalid order id %s' % order_run_id)
click.secho('%s - Stop order id %s' % (get_datetime(), order_run_id), fg='green')
check_and_print(DKCloudCommandRunner.stop_orderrun(backend.dki, order_run_id.strip())) | python | {
"resource": ""
} |
q40688 | orderrun_detail | train | def orderrun_detail(backend, kitchen, summary, nodestatus, runstatus, log, timing, test, all_things,
order_id, order_run_id, disp_order_id, disp_order_run_id):
"""
Display information about an Order-Run
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
# if recipe is None:
# recipe = DKRecipeDisk.find_reciper_name()
# if recipe is None:
# raise click.ClickException('You must be in a recipe folder, or provide a recipe name.')
pd = dict()
if all_things:
pd['summary'] = True
pd['logs'] = True
pd['timingresults'] = True
pd['testresults'] = True
# pd['state'] = True
pd['status'] = True
if summary:
pd['summary'] = True
if log:
pd['logs'] = True
if timing:
pd['timingresults'] = True
if test:
pd['testresults'] = True
if nodestatus:
pd['status'] = True
if runstatus:
pd['runstatus'] = True
if disp_order_id:
pd['disp_order_id'] = True
if disp_order_run_id:
pd['disp_order_run_id'] = True
# if the user does not specify anything to display, show the summary information
if not runstatus and \
not all_things and \
not test and \
not timing and \
not log and \
not nodestatus and \
not summary and \
not disp_order_id and \
not disp_order_run_id:
pd['summary'] = True
if order_id is not None and order_run_id is not None:
raise click.ClickException("Cannot specify both the Order Id and the OrderRun Id")
if order_id is not None:
pd[DKCloudCommandRunner.ORDER_ID] = order_id.strip()
elif order_run_id is not None:
pd[DKCloudCommandRunner.ORDER_RUN_ID] = order_run_id.strip()
# don't print the green thing if it is just runstatus
if not runstatus and not disp_order_id and not disp_order_run_id:
click.secho('%s - Display Order-Run details from kitchen %s' % (get_datetime(), use_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.orderrun_detail(backend.dki, use_kitchen, pd)) | python | {
"resource": ""
} |
q40689 | delete_orderrun | train | def delete_orderrun(backend, orderrun_id):
"""
Delete the orderrun specified by the argument.
"""
click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green')
check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip())) | python | {
"resource": ""
} |
q40690 | secret_list | train | def secret_list(backend,path):
"""
List all Secrets
"""
click.echo(click.style('%s - Getting the list of secrets' % get_datetime(), fg='green'))
check_and_print(
DKCloudCommandRunner.secret_list(backend.dki,path)) | python | {
"resource": ""
} |
q40691 | secret_write | train | def secret_write(backend,entry):
"""
Write a secret
"""
path,value=entry.split('=')
if value.startswith('@'):
with open(value[1:]) as vfile:
value = vfile.read()
click.echo(click.style('%s - Writing secret' % get_datetime(), fg='green'))
check_and_print(
DKCloudCommandRunner.secret_write(backend.dki,path,value)) | python | {
"resource": ""
} |
q40692 | Data.readlines | train | def readlines(self, *args, **kwargs):
"""Return list of all lines. Always returns list of unicode."""
return list(iter(partial(self.readline, *args, **kwargs), u'')) | python | {
"resource": ""
} |
q40693 | Data.save_to | train | def save_to(self, file):
"""Save data to file.
Will copy by either writing out the data or using
:func:`shutil.copyfileobj`.
:param file: A file-like object (with a ``write`` method) or a
filename."""
dest = file
if hasattr(dest, 'write'):
# writing to a file-like
# only works when no unicode conversion is done
if self.file is not None and\
getattr(self.file, 'encoding', None) is None:
copyfileobj(self.file, dest)
elif self.filename is not None:
with open(self.filename, 'rb') as inp:
copyfileobj(inp, dest)
else:
dest.write(self.__bytes__())
else:
# we do not use filesystem io to make sure we have the same
# permissions all around
# copyfileobj() should be efficient enough
# destination is a filename
with open(dest, 'wb') as out:
return self.save_to(out) | python | {
"resource": ""
} |
q40694 | InficonHapsite._ions | train | def _ions(self, f):
"""
This is a generator that returns the mzs being measured during
each time segment, one segment at a time.
"""
outside_pos = f.tell()
doff = find_offset(f, 4 * b'\xff' + 'HapsSearch'.encode('ascii'))
# actual end of prev section is 34 bytes before, but assume 1 rec
f.seek(doff - 62)
# seek backwards to find the FFFFFFFF header
while True:
f.seek(f.tell() - 8)
if f.read(4) == 4 * b'\xff':
break
f.seek(f.tell() + 64)
nsegments = struct.unpack('<I', f.read(4))[0]
for _ in range(nsegments):
# first 32 bytes are segment name, rest are something else?
f.seek(f.tell() + 96)
nions = struct.unpack('<I', f.read(4))[0]
ions = []
for _ in range(nions):
# TODO: check that itype is actually a SIM/full scan switch
i1, i2, _, _, _, _, itype, _ = struct.unpack('<' + 8 * 'I',
f.read(32))
if itype == 0: # SIM
ions.append(i1 / 100.)
else: # full scan
# TODO: this might be a little hacky?
# ideally we would need to know n for this, e.g.:
# ions += np.linspace(i1 / 100, i2 / 100, n).tolist()
ions += np.arange(i1 / 100., i2 / 100. + 1, 1).tolist()
# save the file position and load the position
# that we were at before we started this code
inside_pos = f.tell()
f.seek(outside_pos)
yield ions
outside_pos = f.tell()
f.seek(inside_pos)
f.seek(outside_pos) | python | {
"resource": ""
} |
q40695 | Daemon._emit_message | train | def _emit_message(cls, message):
"""Print a message to STDOUT."""
sys.stdout.write(message)
sys.stdout.flush() | python | {
"resource": ""
} |
q40696 | Daemon._emit_error | train | def _emit_error(cls, message):
"""Print an error message to STDERR."""
sys.stderr.write('ERROR: {message}\n'.format(message=message))
sys.stderr.flush() | python | {
"resource": ""
} |
q40697 | Daemon._emit_warning | train | def _emit_warning(cls, message):
"""Print an warning message to STDERR."""
sys.stderr.write('WARNING: {message}\n'.format(message=message))
sys.stderr.flush() | python | {
"resource": ""
} |
q40698 | Daemon._setup_piddir | train | def _setup_piddir(self):
"""Create the directory for the PID file if necessary."""
if self.pidfile is None:
return
piddir = os.path.dirname(self.pidfile)
if not os.path.isdir(piddir):
# Create the directory with sensible mode and ownership
os.makedirs(piddir, 0o777 & ~self.umask)
os.chown(piddir, self.uid, self.gid) | python | {
"resource": ""
} |
q40699 | Daemon._read_pidfile | train | def _read_pidfile(self):
"""Read the PID file and check to make sure it's not stale."""
if self.pidfile is None:
return None
if not os.path.isfile(self.pidfile):
return None
# Read the PID file
with open(self.pidfile, 'r') as fp:
try:
pid = int(fp.read())
except ValueError:
self._emit_warning('Empty or broken pidfile {pidfile}; '
'removing'.format(pidfile=self.pidfile))
pid = None
if pid is not None and psutil.pid_exists(pid):
return pid
else:
# Remove the stale PID file
os.remove(self.pidfile)
return None | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.