code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def asdatetime(self, naive=True):
"""Return this datetime_tz as a datetime object.
Args:
naive: Return *without* any tz info.
Returns:
This datetime_tz as a datetime object.
"""
args = list(self.timetuple()[0:6])+[self.microsecond]
if not naive:
args.append(self.tzinfo)
return datetime.datetime(*args) | Return this datetime_tz as a datetime object.
Args:
naive: Return *without* any tz info.
Returns:
This datetime_tz as a datetime object. |
def processPointOfSalePayment(request):
'''
This view handles the callbacks from point-of-sale transactions.
Please note that this will only work if you have set up your callback
URL in Square to point to this view.
'''
print('Request data is: %s' % request.GET)
# iOS transactions put all response information in the data key:
data = json.loads(request.GET.get('data','{}'))
if data:
status = data.get('status')
errorCode = data.get('error_code')
errorDescription = errorCode
try:
stateData = data.get('state','')
if stateData:
metadata = json.loads(b64decode(unquote(stateData).encode()).decode())
else:
metadata = {}
except (TypeError, ValueError, binascii.Error):
logger.error('Invalid metadata passed from Square app.')
messages.error(
request,
format_html(
'<p>{}</p><ul><li><strong>CODE:</strong> {}</li><li><strong>DESCRIPTION:</strong> {}</li></ul>',
str(_('ERROR: Error with Square point of sale transaction attempt.')),
str(_('Invalid metadata passed from Square app.')),
)
)
return HttpResponseRedirect(reverse('showRegSummary'))
# This is the normal transaction identifier, which will be stored in the
# database as a SquarePaymentRecord
serverTransId = data.get('transaction_id')
# This is the only identifier passed for non-card transactions.
clientTransId = data.get('client_transaction_id')
else:
# Android transactions use this GET response syntax
errorCode = request.GET.get('com.squareup.pos.ERROR_CODE')
errorDescription = request.GET.get('com.squareup.pos.ERROR_DESCRIPTION')
status = 'ok' if not errorCode else 'error'
# This is the normal transaction identifier, which will be stored in the
# database as a SquarePaymentRecord
serverTransId = request.GET.get('com.squareup.pos.SERVER_TRANSACTION_ID')
# This is the only identifier passed for non-card transactions.
clientTransId = request.GET.get('com.squareup.pos.CLIENT_TRANSACTION_ID')
# Load the metadata, which includes the registration or invoice ids
try:
stateData = request.GET.get('com.squareup.pos.REQUEST_METADATA','')
if stateData:
metadata = json.loads(b64decode(unquote(stateData).encode()).decode())
else:
metadata = {}
except (TypeError, ValueError, binascii.Error):
logger.error('Invalid metadata passed from Square app.')
messages.error(
request,
format_html(
'<p>{}</p><ul><li><strong>CODE:</strong> {}</li><li><strong>DESCRIPTION:</strong> {}</li></ul>',
str(_('ERROR: Error with Square point of sale transaction attempt.')),
str(_('Invalid metadata passed from Square app.')),
)
)
return HttpResponseRedirect(reverse('showRegSummary'))
# Other things that can be passed in the metadata
sourceUrl = metadata.get('sourceUrl',reverse('showRegSummary'))
successUrl = metadata.get('successUrl',reverse('registration'))
submissionUserId = metadata.get('userId', getattr(getattr(request,'user',None),'id',None))
transactionType = metadata.get('transaction_type')
taxable = metadata.get('taxable', False)
addSessionInfo = metadata.get('addSessionInfo',False)
customerEmail = metadata.get('customerEmail')
if errorCode or status != 'ok':
# Return the user to their original page with the error message displayed.
logger.error('Error with Square point of sale transaction attempt. CODE: %s; DESCRIPTION: %s' % (errorCode, errorDescription))
messages.error(
request,
format_html(
'<p>{}</p><ul><li><strong>CODE:</strong> {}</li><li><strong>DESCRIPTION:</strong> {}</li></ul>',
str(_('ERROR: Error with Square point of sale transaction attempt.')), errorCode, errorDescription
)
)
return HttpResponseRedirect(sourceUrl)
api_instance = TransactionsApi()
api_instance.api_client.configuration.access_token = getattr(settings,'SQUARE_ACCESS_TOKEN','')
location_id = getattr(settings,'SQUARE_LOCATION_ID','')
if serverTransId:
try:
api_response = api_instance.retrieve_transaction(transaction_id=serverTransId,location_id=location_id)
except ApiException:
logger.error('Unable to find Square transaction by server ID.')
messages.error(request,_('ERROR: Unable to find Square transaction by server ID.'))
return HttpResponseRedirect(sourceUrl)
if api_response.errors:
logger.error('Unable to find Square transaction by server ID: %s' % api_response.errors)
messages.error(request,str(_('ERROR: Unable to find Square transaction by server ID:')) + api_response.errors)
return HttpResponseRedirect(sourceUrl)
transaction = api_response.transaction
elif clientTransId:
# Try to find the transaction in the 50 most recent transactions
try:
api_response = api_instance.list_transactions(location_id=location_id)
except ApiException:
logger.error('Unable to find Square transaction by client ID.')
messages.error(request,_('ERROR: Unable to find Square transaction by client ID.'))
return HttpResponseRedirect(sourceUrl)
if api_response.errors:
logger.error('Unable to find Square transaction by client ID: %s' % api_response.errors)
messages.error(request,str(_('ERROR: Unable to find Square transaction by client ID:')) + api_response.errors)
return HttpResponseRedirect(sourceUrl)
transactions_list = [x for x in api_response.transactions if x.client_id == clientTransId]
if len(transactions_list) == 1:
transaction = transactions_list[0]
else:
logger.error('Returned client transaction ID not found.')
messages.error(request,_('ERROR: Returned client transaction ID not found.'))
return HttpResponseRedirect(sourceUrl)
else:
logger.error('An unknown error has occurred with Square point of sale transaction attempt.')
messages.error(request,_('ERROR: An unknown error has occurred with Square point of sale transaction attempt.'))
return HttpResponseRedirect(sourceUrl)
# Get total information from the transaction for handling invoice.
this_total = sum([x.amount_money.amount / 100 for x in transaction.tenders or []]) - \
sum([x.amount_money.amount / 100 for x in transaction.refunds or []])
# Parse if a specific submission user is indicated
submissionUser = None
if submissionUserId:
try:
submissionUser = User.objects.get(id=int(submissionUserId))
except (ValueError, ObjectDoesNotExist):
logger.warning('Invalid user passed, submissionUser will not be recorded.')
if 'registration' in metadata.keys():
try:
tr_id = int(metadata.get('registration'))
tr = TemporaryRegistration.objects.get(id=tr_id)
except (ValueError, TypeError, ObjectDoesNotExist):
logger.error('Invalid registration ID passed: %s' % metadata.get('registration'))
messages.error(
request,
str(_('ERROR: Invalid registration ID passed')) + ': %s' % metadata.get('registration')
)
return HttpResponseRedirect(sourceUrl)
tr.expirationDate = timezone.now() + timedelta(minutes=getConstant('registration__sessionExpiryMinutes'))
tr.save()
this_invoice = Invoice.get_or_create_from_registration(tr, submissionUser=submissionUser)
this_description = _('Registration Payment: #%s' % tr_id)
elif 'invoice' in metadata.keys():
try:
this_invoice = Invoice.objects.get(id=int(metadata.get('invoice')))
this_description = _('Invoice Payment: %s' % this_invoice.id)
except (ValueError, TypeError, ObjectDoesNotExist):
logger.error('Invalid invoice ID passed: %s' % metadata.get('invoice'))
messages.error(
request,
str(_('ERROR: Invalid invoice ID passed')) + ': %s' % metadata.get('invoice')
)
return HttpResponseRedirect(sourceUrl)
else:
# Gift certificates automatically get a nicer invoice description
if transactionType == 'Gift Certificate':
this_description = _('Gift Certificate Purchase')
else:
this_description = transactionType
this_invoice = Invoice.create_from_item(
this_total,
this_description,
submissionUser=submissionUser,
calculate_taxes=(taxable is not False),
transactionType=transactionType,
)
paymentRecord, created = SquarePaymentRecord.objects.get_or_create(
transactionId=transaction.id,
locationId=transaction.location_id,
defaults={'invoice': this_invoice,}
)
if created:
# We process the payment now, and enqueue the job to retrieve the
# transaction again once fees have been calculated by Square
this_invoice.processPayment(
amount=this_total,
fees=0,
paidOnline=True,
methodName='Square Point of Sale',
methodTxn=transaction.id,
notify=customerEmail,
)
updateSquareFees.schedule(args=(paymentRecord,), delay=60)
if addSessionInfo:
paymentSession = request.session.get(INVOICE_VALIDATION_STR, {})
paymentSession.update({
'invoiceID': str(this_invoice.id),
'amount': this_total,
'successUrl': successUrl,
})
request.session[INVOICE_VALIDATION_STR] = paymentSession
return HttpResponseRedirect(successUrl) | This view handles the callbacks from point-of-sale transactions.
Please note that this will only work if you have set up your callback
URL in Square to point to this view. |
def create_preauth(byval, key, by='name', expires=0, timestamp=None):
""" Generates a zimbra preauth value
:param byval: The value of the targeted user (according to the
by-parameter). For example: The account name, if "by" is "name".
:param key: The domain preauth key (you can retrieve that using zmprov gd)
:param by: What type is the byval-parameter? Valid parameters are "name"
(default), "id" and "foreignPrincipal"
:param expires: Milliseconds when the auth token expires. Defaults to 0
for default account expiration
:param timestamp: Current timestamp (is calculated by default)
:returns: The preauth value to be used in an AuthRequest
:rtype: str
"""
if timestamp is None:
timestamp = int(datetime.now().strftime("%s")) * 1000
pak = hmac.new(
codecs.latin_1_encode(key)[0],
('%s|%s|%s|%s' % (
byval,
by,
expires,
timestamp
)).encode("utf-8"),
hashlib.sha1
).hexdigest()
return pak | Generates a zimbra preauth value
:param byval: The value of the targeted user (according to the
by-parameter). For example: The account name, if "by" is "name".
:param key: The domain preauth key (you can retrieve that using zmprov gd)
:param by: What type is the byval-parameter? Valid parameters are "name"
(default), "id" and "foreignPrincipal"
:param expires: Milliseconds when the auth token expires. Defaults to 0
for default account expiration
:param timestamp: Current timestamp (is calculated by default)
:returns: The preauth value to be used in an AuthRequest
:rtype: str |
def proc_file(infile: str, outfile: str, opts: Namespace) -> bool:
"""
Process infile.
:param infile: input file to be processed
:param outfile: target output file.
:param opts:
:return:
"""
g = fhir_json_to_rdf(infile, opts.uribase, opts.graph, add_ontology_header=not opts.noontology,
do_continuations=not opts.nocontinuation, replace_narrative_text=bool(opts.nonarrative),
metavoc=opts.fhir_metavoc)
# If we aren't carrying graph in opts, we're doing a file by file transformation
if g:
if not opts.graph:
serialize_graph(g, outfile, opts)
return True
else:
print("{} : Not a FHIR collection or resource".format(infile))
return False | Process infile.
:param infile: input file to be processed
:param outfile: target output file.
:param opts:
:return: |
def transform_annotation(self, ann, duration):
'''Apply the vector transformation.
Parameters
----------
ann : jams.Annotation
The input annotation
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['vector'] : np.ndarray, shape=(dimension,)
Raises
------
DataError
If the input dimension does not match
'''
_, values = ann.to_interval_values()
vector = np.asarray(values[0], dtype=self.dtype)
if len(vector) != self.dimension:
raise DataError('vector dimension({:0}) '
'!= self.dimension({:1})'
.format(len(vector), self.dimension))
return {'vector': vector} | Apply the vector transformation.
Parameters
----------
ann : jams.Annotation
The input annotation
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['vector'] : np.ndarray, shape=(dimension,)
Raises
------
DataError
If the input dimension does not match |
def writes(mdict, filename='data.h5', truncate_existing=False,
truncate_invalid_matlab=False, options=None, **keywords):
""" Writes data into an HDF5 file (high level).
High level function to store one or more Python types (data) to
specified pathes in an HDF5 file. The paths are specified as POSIX
style paths where the directory name is the Group to put it in and
the basename is the name to write it to.
There are various options that can be used to influence how the data
is written. They can be passed as an already constructed ``Options``
into `options` or as additional keywords that will be used to make
one by ``options = Options(**keywords)``.
Two very important options are ``store_python_metadata`` and
``matlab_compatible``, which are ``bool``. The first makes it so
that enough metadata (HDF5 Attributes) are written that `data` can
be read back accurately without it (or its contents if it is a
container type) ending up different types, transposed in the case of
numpy arrays, etc. The latter makes it so that the appropriate
metadata is written, string and bool and complex types are converted
properly, and numpy arrays are transposed; which is needed to make
sure that MATLAB can import `data` correctly (the HDF5 header is
also set so MATLAB will recognize it).
Paths are POSIX style and can either be given directly as ``str`` or
``bytes``, or the separated path can be given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``utilities.escape_path``. Otherwise, the path is assumed to
be already escaped. Escaping is done so that targets with a part
that starts with one or more periods, contain slashes, and/or
contain nulls can be used without causing the wrong Group to be
looked in or the wrong target to be looked at. It essentially allows
one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving
around in the Dataset hierarchy.
Parameters
----------
mdict : dict, dict like
The ``dict`` or other dictionary type object of paths
and data to write to the file. The paths, the keys, must be
POSIX style paths where the directory name is the Group to put
it in and the basename is the name to write it to. The values
are the data to write.
filename : str, optional
The name of the HDF5 file to write `data` to.
truncate_existing : bool, optional
Whether to truncate the file if it already exists before writing
to it.
truncate_invalid_matlab : bool, optional
Whether to truncate a file if matlab_compatibility is being
done and the file doesn't have the proper header (userblock in
HDF5 terms) setup for MATLAB metadata to be placed.
options : Options, optional
The options to use when writing. Is mutually exclusive with any
additional keyword arguments given (set to ``None`` or don't
provide to use them).
**keywords :
If `options` was not provided or was ``None``, these are used as
arguments to make a ``Options``.
Raises
------
TypeError
If a path is of an invalid type.
NotImplementedError
If writing `data` is not supported.
exceptions.TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
utilities.process_path
utilities.escape_path
write : Writes just a single piece of data
reads
read
Options
utilities.write_data : Low level version
"""
# Pack the different options into an Options class if an Options was
# not given.
if not isinstance(options, Options):
options = Options(**keywords)
# Go through mdict, extract the paths and data, and process the
# paths. A list of tulpes for each piece of data to write will be
# constructed where he first element is the group name, the second
# the target name (name of the Dataset/Group holding the data), and
# the third element the data to write.
towrite = []
for p, v in mdict.items():
groupname, targetname = utilities.process_path(p)
# Pack into towrite.
towrite.append((groupname, targetname, v))
# Open/create the hdf5 file but don't write the data yet since the
# userblock still needs to be set. This is all wrapped in a try
# block, so that the file can be closed if any errors happen (the
# error is re-raised).
f = None
try:
# If the file doesn't already exist or the option is set to
# truncate it if it does, just open it truncating whatever is
# there. Otherwise, open it for read/write access without
# truncating. Now, if we are doing matlab compatibility and it
# doesn't have a big enough userblock (for metadata for MATLAB
# to be able to tell it is a valid .mat file) and the
# truncate_invalid_matlab is set, then it needs to be closed and
# re-opened with truncation. Whenever we create the file from
# scratch, even if matlab compatibility isn't being done, a
# sufficiently sized userblock is going to be allocated
# (smallest size is 512) for future use (after all, someone
# might want to turn it to a .mat file later and need it and it
# is only 512 bytes).
if truncate_existing or not os.path.isfile(filename):
f = h5py.File(filename, mode='w', userblock_size=512)
else:
f = h5py.File(filename)
if options.matlab_compatible and truncate_invalid_matlab \
and f.userblock_size < 128:
f.close()
f = h5py.File(filename, mode='w', userblock_size=512)
except:
raise
finally:
# If the hdf5 file was opened at all, get the userblock size and
# close it since we need to set the userblock.
if isinstance(f, h5py.File):
userblock_size = f.userblock_size
f.close()
else:
raise IOError('Unable to create or open file.')
# If we are doing MATLAB formatting and there is a sufficiently
# large userblock, write the new userblock. The same sort of error
# handling is used.
if options.matlab_compatible and userblock_size >= 128:
# Get the time.
now = datetime.datetime.now()
# Construct the leading string. The MATLAB one looks like
#
# s = 'MATLAB 7.3 MAT-file, Platform: GLNXA64, Created on: ' \
# + now.strftime('%a %b %d %H:%M:%S %Y') \
# + ' HDF5 schema 1.00 .'
#
# Platform is going to be changed to hdf5storage version.
s = 'MATLAB 7.3 MAT-file, Platform: hdf5storage ' \
+ __version__ + ', Created on: ' \
+ now.strftime('%a %b %d %H:%M:%S %Y') \
+ ' HDF5 schema 1.00 .'
# Make the bytearray while padding with spaces up to 128-12
# (the minus 12 is there since the last 12 bytes are special.
b = bytearray(s + (128-12-len(s))*' ', encoding='utf-8')
# Add 8 nulls (0) and the magic number (or something) that
# MATLAB uses.
b.extend(bytearray.fromhex('00000000 00000000 0002494D'))
# Now, write it to the beginning of the file.
try:
fd = open(filename, 'r+b')
fd.write(b)
except:
raise
finally:
fd.close()
# Open the hdf5 file again and write the data, making the Group if
# necessary. This is all wrapped in a try block, so that the file
# can be closed if any errors happen (the error is re-raised).
f = None
try:
f = h5py.File(filename)
# Go through each element of towrite and write them.
for groupname, targetname, data in towrite:
# Need to make sure groupname is a valid group in f and grab its
# handle to pass on to the low level function.
if groupname not in f:
grp = f.require_group(groupname)
else:
grp = f[groupname]
# Hand off to the low level function.
utilities.write_data(f, grp, targetname, data,
None, options)
except:
raise
finally:
if isinstance(f, h5py.File):
f.close() | Writes data into an HDF5 file (high level).
High level function to store one or more Python types (data) to
specified pathes in an HDF5 file. The paths are specified as POSIX
style paths where the directory name is the Group to put it in and
the basename is the name to write it to.
There are various options that can be used to influence how the data
is written. They can be passed as an already constructed ``Options``
into `options` or as additional keywords that will be used to make
one by ``options = Options(**keywords)``.
Two very important options are ``store_python_metadata`` and
``matlab_compatible``, which are ``bool``. The first makes it so
that enough metadata (HDF5 Attributes) are written that `data` can
be read back accurately without it (or its contents if it is a
container type) ending up different types, transposed in the case of
numpy arrays, etc. The latter makes it so that the appropriate
metadata is written, string and bool and complex types are converted
properly, and numpy arrays are transposed; which is needed to make
sure that MATLAB can import `data` correctly (the HDF5 header is
also set so MATLAB will recognize it).
Paths are POSIX style and can either be given directly as ``str`` or
``bytes``, or the separated path can be given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``utilities.escape_path``. Otherwise, the path is assumed to
be already escaped. Escaping is done so that targets with a part
that starts with one or more periods, contain slashes, and/or
contain nulls can be used without causing the wrong Group to be
looked in or the wrong target to be looked at. It essentially allows
one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving
around in the Dataset hierarchy.
Parameters
----------
mdict : dict, dict like
The ``dict`` or other dictionary type object of paths
and data to write to the file. The paths, the keys, must be
POSIX style paths where the directory name is the Group to put
it in and the basename is the name to write it to. The values
are the data to write.
filename : str, optional
The name of the HDF5 file to write `data` to.
truncate_existing : bool, optional
Whether to truncate the file if it already exists before writing
to it.
truncate_invalid_matlab : bool, optional
Whether to truncate a file if matlab_compatibility is being
done and the file doesn't have the proper header (userblock in
HDF5 terms) setup for MATLAB metadata to be placed.
options : Options, optional
The options to use when writing. Is mutually exclusive with any
additional keyword arguments given (set to ``None`` or don't
provide to use them).
**keywords :
If `options` was not provided or was ``None``, these are used as
arguments to make a ``Options``.
Raises
------
TypeError
If a path is of an invalid type.
NotImplementedError
If writing `data` is not supported.
exceptions.TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
utilities.process_path
utilities.escape_path
write : Writes just a single piece of data
reads
read
Options
utilities.write_data : Low level version |
def _translate_nd(self,
source: mx.nd.NDArray,
source_length: int,
restrict_lexicon: Optional[lexicon.TopKLexicon],
raw_constraints: List[Optional[constrained.RawConstraintList]],
raw_avoid_list: List[Optional[constrained.RawConstraintList]],
max_output_lengths: mx.nd.NDArray) -> List[Translation]:
"""
Translates source of source_length, given a bucket_key.
:param source: Source ids. Shape: (batch_size, bucket_key, num_factors).
:param source_length: Bucket key.
:param restrict_lexicon: Lexicon to use for vocabulary restriction.
:param raw_constraints: A list of optional constraint lists.
:return: Sequence of translations.
"""
return self._get_best_from_beam(*self._beam_search(source,
source_length,
restrict_lexicon,
raw_constraints,
raw_avoid_list,
max_output_lengths)) | Translates source of source_length, given a bucket_key.
:param source: Source ids. Shape: (batch_size, bucket_key, num_factors).
:param source_length: Bucket key.
:param restrict_lexicon: Lexicon to use for vocabulary restriction.
:param raw_constraints: A list of optional constraint lists.
:return: Sequence of translations. |
def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found] | Get all the tasks that match a name |
def any(self, predicate=None):
'''Determine if the source sequence contains any elements which satisfy
the predicate.
Only enough of the sequence to satisfy the predicate once is consumed.
Note: This method uses immediate execution.
Args:
predicate: An optional single argument function used to test each
element. If omitted, or None, this method returns True if there
is at least one element in the source.
Returns:
True if the sequence contains at least one element which satisfies
the predicate, otherwise False.
Raises:
ValueError: If the Queryable is closed()
'''
if self.closed():
raise ValueError("Attempt to call any() on a closed Queryable.")
if predicate is None:
predicate = lambda x: True
if not is_callable(predicate):
raise TypeError("any() parameter predicate={predicate} is not callable".format(predicate=repr(predicate)))
for item in self.select(predicate):
if item:
return True
return False | Determine if the source sequence contains any elements which satisfy
the predicate.
Only enough of the sequence to satisfy the predicate once is consumed.
Note: This method uses immediate execution.
Args:
predicate: An optional single argument function used to test each
element. If omitted, or None, this method returns True if there
is at least one element in the source.
Returns:
True if the sequence contains at least one element which satisfies
the predicate, otherwise False.
Raises:
ValueError: If the Queryable is closed() |
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:] | bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded. |
def iau2000b(jd_tt):
"""Compute Earth nutation based on the faster IAU 2000B nutation model.
`jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats
Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of
a micro-arcsecond. Each is either a float, or a NumPy array with
the same dimensions as the input argument. The result will not take
as long to compute as the full IAU 2000A series, but should still
agree with ``iau2000a()`` to within a milliarcsecond between the
years 1995 and 2020.
"""
dpplan = -0.000135 * 1e7
deplan = 0.000388 * 1e7
t = (jd_tt - T0) / 36525.0
# TODO: can these be replaced with fa0 and f1?
el = fmod (485868.249036 +
t * 1717915923.2178, ASEC360) * ASEC2RAD;
elp = fmod (1287104.79305 +
t * 129596581.0481, ASEC360) * ASEC2RAD;
f = fmod (335779.526232 +
t * 1739527262.8478, ASEC360) * ASEC2RAD;
d = fmod (1072260.70369 +
t * 1602961601.2090, ASEC360) * ASEC2RAD;
om = fmod (450160.398036 -
t * 6962890.5431, ASEC360) * ASEC2RAD;
a = array((el, elp, f, d, om))
arg = nals_t[:77].dot(a)
fmod(arg, tau, out=arg)
sarg = sin(arg)
carg = cos(arg)
stsc = array((sarg, t * sarg, carg)).T
ctcs = array((carg, t * carg, sarg)).T
dp = tensordot(stsc, lunisolar_longitude_coefficients[:77,])
de = tensordot(ctcs, lunisolar_obliquity_coefficients[:77,])
dpsi = dpplan + dp
deps = deplan + de
return dpsi, deps | Compute Earth nutation based on the faster IAU 2000B nutation model.
`jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats
Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of
a micro-arcsecond. Each is either a float, or a NumPy array with
the same dimensions as the input argument. The result will not take
as long to compute as the full IAU 2000A series, but should still
agree with ``iau2000a()`` to within a milliarcsecond between the
years 1995 and 2020. |
def satisfaction_rating_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#show-satisfaction-rating"
api_path = "/api/v2/satisfaction_ratings/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#show-satisfaction-rating |
def Message(msg, id=260, ok=None):
"""Original doc: Display a MESSAGE string.
Return when the user clicks the OK button or presses Return.
The MESSAGE string can be at most 255 characters long.
"""
return psidialogs.message(message=msg, ok=ok) | Original doc: Display a MESSAGE string.
Return when the user clicks the OK button or presses Return.
The MESSAGE string can be at most 255 characters long. |
def configure(self, ns, mappings=None, **kwargs):
"""
Apply mappings to a namespace.
"""
if mappings is None:
mappings = dict()
mappings.update(kwargs)
for operation, definition in mappings.items():
try:
configure_func = self._find_func(operation)
except AttributeError:
pass
else:
configure_func(ns, self._make_definition(definition)) | Apply mappings to a namespace. |
def fix_hp_addrs(server):
"""
Works around hpcloud's peculiar "all ip addresses are returned as private
even though one is public" bug. This is also what the official hpfog gem
does in the ``Fog::Compute::HP::Server#public_ip_address`` method.
:param dict server: Contains the server ID, a list of public IP addresses,
and a list of private IP addresses.
"""
fixed = {A.server.ID: server[A.server.ID]}
both = server.get(A.server.PRIVATE_IPS)
if both:
fixed[A.server.PUBLIC_IPS] = [both[1]]
fixed[A.server.PRIVATE_IPS] = [both[0]]
return fixed | Works around hpcloud's peculiar "all ip addresses are returned as private
even though one is public" bug. This is also what the official hpfog gem
does in the ``Fog::Compute::HP::Server#public_ip_address`` method.
:param dict server: Contains the server ID, a list of public IP addresses,
and a list of private IP addresses. |
def initialize(self, configfile=None):
"""
Initialize the module
"""
method = "initialize"
A = None
metadata = {method: configfile}
send_array(self.socket, A, metadata)
A, metadata = recv_array(
self.socket, poll=self.poll, poll_timeout=self.poll_timeout,
flags=self.zmq_flags) | Initialize the module |
def auth(self, request):
"""
let's auth the user to the Service
:param request: request object
:return: callback url
:rtype: string that contains the url to redirect after auth
"""
service = UserService.objects.get(user=request.user, name='ServiceWallabag')
callback_url = '%s://%s%s' % (request.scheme, request.get_host(), reverse('wallabag_callback'))
params = {'username': service.username,
'password': service.password,
'client_id': service.client_id,
'client_secret': service.client_secret}
access_token = Wall.get_token(host=service.host, **params)
request.session['oauth_token'] = access_token
return callback_url | let's auth the user to the Service
:param request: request object
:return: callback url
:rtype: string that contains the url to redirect after auth |
def display(self):
"""A unicode value with the object's data, to be used for displaying
the object in your application."""
if self.degree and self.school:
disp = self.degree + u' from ' + self.school
else:
disp = self.degree or self.school or None
if disp is not None and self.date_range is not None:
disp += u' (%d-%d)' % self.date_range.years_range
return disp or u'' | A unicode value with the object's data, to be used for displaying
the object in your application. |
def run(self, files, stack):
"Clean your text"
for filename, post in files.items():
post.content = self.bleach.clean(post.content, *self.args, **self.kwargs) | Clean your text |
def analyze(self):
""" Run an analysis on all of the items in this library section. See
See :func:`~plexapi.base.PlexPartialObject.analyze` for more details.
"""
key = '/library/sections/%s/analyze' % self.key
self._server.query(key, method=self._server._session.put) | Run an analysis on all of the items in this library section. See
See :func:`~plexapi.base.PlexPartialObject.analyze` for more details. |
def get_variant_by_name(self, name, variant_info=None):
"""Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
variant_info (pandas.Series): The marker information (e.g. seek).
Returns:
list: A list of Genotypes (only one for PyPlink, see note below).
Note
====
From PyPlink version 1.3.2 and onwards, each name is unique in the
dataset. Hence, we can use the 'get_geno_marker' function and be
sure only one variant is returned.
"""
# From 1.3.2 onwards, PyPlink sets unique names.
if not self.has_index:
raise NotImplementedError("Not implemented when IMPUTE2 file is "
"not indexed (see genipe)")
# Getting the seek position
if variant_info is None:
try:
variant_info = self._impute2_index.loc[name, :]
except KeyError:
if name in self.get_duplicated_markers():
# The variant is a duplicated one, so we go through all the
# variants with the same name and the :dupx suffix
return [
self.get_variant_by_name(dup_name).pop()
for dup_name in self.get_duplicated_markers()[name]
]
else:
# The variant is not in the index
logging.variant_name_not_found(name)
return []
# Seeking to the right place in the file
self._impute2_file.seek(variant_info.seek)
# Parsing the file
genotypes = self._parse_impute2_line(self._impute2_file.readline())
# Fixing the object
self._fix_genotypes_object(genotypes, variant_info)
return [genotypes] | Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
variant_info (pandas.Series): The marker information (e.g. seek).
Returns:
list: A list of Genotypes (only one for PyPlink, see note below).
Note
====
From PyPlink version 1.3.2 and onwards, each name is unique in the
dataset. Hence, we can use the 'get_geno_marker' function and be
sure only one variant is returned. |
def scan_temperature(self, measure, temperature, rate, delay=1):
"""Performs a temperature scan.
Measures until the target temperature is reached.
:param measure: A callable called repeatedly until stability at target
temperature is reached.
:param temperature: The target temperature in kelvin.
:param rate: The sweep rate in kelvin per minute.
:param delay: The time delay between each call to measure in seconds.
"""
if not hasattr(measure, '__call__'):
raise TypeError('measure parameter not callable.')
self.set_temperature(temperature, rate, 'no overshoot', wait_for_stability=False)
start = datetime.datetime.now()
while True:
# The PPMS needs some time to update the status code, we therefore ignore it for 10s.
if (self.system_status['temperature'] == 'normal stability at target temperature' and
(datetime.datetime.now() - start > datetime.timedelta(seconds=10))):
break
measure()
time.sleep(delay) | Performs a temperature scan.
Measures until the target temperature is reached.
:param measure: A callable called repeatedly until stability at target
temperature is reached.
:param temperature: The target temperature in kelvin.
:param rate: The sweep rate in kelvin per minute.
:param delay: The time delay between each call to measure in seconds. |
def fullinfo_get(self, tids, session, fields=[]):
'''taobao.topats.trades.fullinfo.get 异步批量获取交易订单详情api
使用指南:http://open.taobao.com/dev/index.php/ATS%E4%BD%BF%E7%94%A8%E6%8C%87%E5%8D%97
- 1.提供异步批量获取订单详情功能
- 2.一次调用最多支持40个订单
- 3.提交任务会进行初步任务校验,如果成功会返回任务号和创建时间,如果失败就报错
- 4.可以接收淘宝发出的任务完成消息,也可以过一段时间来取结果。获取结果接口为taobao.topats.result.get
- 5.此api执行完成发送的通知消息格式为{"task":{"task_id":123456,"created":"2010-8-19"}}果'''
request = TOPRequest('taobao.topats.trades.fullinfo.get')
request['tids'] = tids
if not fields:
trade = Trade()
fields = trade.fields
request['fields'] = fields
self.create(self.execute(request, session)['task'])
return self | taobao.topats.trades.fullinfo.get 异步批量获取交易订单详情api
使用指南:http://open.taobao.com/dev/index.php/ATS%E4%BD%BF%E7%94%A8%E6%8C%87%E5%8D%97
- 1.提供异步批量获取订单详情功能
- 2.一次调用最多支持40个订单
- 3.提交任务会进行初步任务校验,如果成功会返回任务号和创建时间,如果失败就报错
- 4.可以接收淘宝发出的任务完成消息,也可以过一段时间来取结果。获取结果接口为taobao.topats.result.get
- 5.此api执行完成发送的通知消息格式为{"task":{"task_id":123456,"created":"2010-8-19"}}果 |
def blocking_start(self, waiting_func=None):
"""this function starts the task manager running to do tasks. The
waiting_func is normally used to do something while other threads
are running, but here we don't have other threads. So the waiting
func will never get called. I can see wanting this function to be
called at least once after the end of the task loop."""
self.logger.debug('threadless start')
try:
for job_params in self._get_iterator(): # may never raise
# StopIteration
self.config.logger.debug('received %r', job_params)
self.quit_check()
if job_params is None:
if self.config.quit_on_empty_queue:
raise KeyboardInterrupt
self.logger.info("there is nothing to do. Sleeping "
"for %d seconds" %
self.config.idle_delay)
self._responsive_sleep(self.config.idle_delay)
continue
self.quit_check()
try:
args, kwargs = job_params
except ValueError:
args = job_params
kwargs = {}
try:
self.task_func(*args, **kwargs)
except Exception:
self.config.logger.error("Error in processing a job",
exc_info=True)
except KeyboardInterrupt:
self.logger.debug('queuingThread gets quit request')
finally:
self.quit = True
self.logger.debug("ThreadlessTaskManager dies quietly") | this function starts the task manager running to do tasks. The
waiting_func is normally used to do something while other threads
are running, but here we don't have other threads. So the waiting
func will never get called. I can see wanting this function to be
called at least once after the end of the task loop. |
def read_namespaced_service(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_service # noqa: E501
read the specified Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Service
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | read_namespaced_service # noqa: E501
read the specified Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Service
If the method is called asynchronously,
returns the request thread. |
def register_download_command(self, download_func):
"""
Add 'download' command for downloading a project to a directory.
For non empty directories it will download remote files replacing local files.
:param download_func: function to run when user choses this option
"""
description = "Download the contents of a remote remote project to a local folder."
download_parser = self.subparsers.add_parser('download', description=description)
add_project_name_or_id_arg(download_parser, help_text_suffix="download")
_add_folder_positional_arg(download_parser)
include_or_exclude = download_parser.add_mutually_exclusive_group(required=False)
_add_include_arg(include_or_exclude)
_add_exclude_arg(include_or_exclude)
download_parser.set_defaults(func=download_func) | Add 'download' command for downloading a project to a directory.
For non empty directories it will download remote files replacing local files.
:param download_func: function to run when user choses this option |
def make_date(df:DataFrame, date_field:str):
"Make sure `df[field_name]` is of the right date type."
field_dtype = df[date_field].dtype
if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
field_dtype = np.datetime64
if not np.issubdtype(field_dtype, np.datetime64):
df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True) | Make sure `df[field_name]` is of the right date type. |
def has_firewall(vlan):
"""Helper to determine whether or not a VLAN has a firewall.
:param dict vlan: A dictionary representing a VLAN
:returns: True if the VLAN has a firewall, false if it doesn't.
"""
return bool(
vlan.get('dedicatedFirewallFlag', None) or
vlan.get('highAvailabilityFirewallFlag', None) or
vlan.get('firewallInterfaces', None) or
vlan.get('firewallNetworkComponents', None) or
vlan.get('firewallGuestNetworkComponents', None)
) | Helper to determine whether or not a VLAN has a firewall.
:param dict vlan: A dictionary representing a VLAN
:returns: True if the VLAN has a firewall, false if it doesn't. |
def MiddleClick(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate mouse middle click at point x, y.
x: int.
y: int.
waitTime: float.
"""
SetCursorPos(x, y)
screenWidth, screenHeight = GetScreenSize()
mouse_event(MouseEventFlag.MiddleDown | MouseEventFlag.Absolute, x * 65535 // screenWidth, y * 65535 // screenHeight, 0, 0)
time.sleep(0.05)
mouse_event(MouseEventFlag.MiddleUp | MouseEventFlag.Absolute, x * 65535 // screenWidth, y * 65535 // screenHeight, 0, 0)
time.sleep(waitTime) | Simulate mouse middle click at point x, y.
x: int.
y: int.
waitTime: float. |
def is_parent_of_catalog(self, id_, catalog_id):
"""Tests if an ``Id`` is a direct parent of a catalog.
arg: id (osid.id.Id): an ``Id``
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
return: (boolean) - ``true`` if this ``id`` is a parent of
``catalog_id,`` ``false`` otherwise
raise: NotFound - ``catalog_id`` is not found
raise: NullArgument - ``id`` or ``catalog_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_parent_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=catalog_id)
return self._hierarchy_session.is_parent(id_=catalog_id, parent_id=id_) | Tests if an ``Id`` is a direct parent of a catalog.
arg: id (osid.id.Id): an ``Id``
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
return: (boolean) - ``true`` if this ``id`` is a parent of
``catalog_id,`` ``false`` otherwise
raise: NotFound - ``catalog_id`` is not found
raise: NullArgument - ``id`` or ``catalog_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
def sign_with_privkey(
digest: bytes,
privkey: Ed25519PrivateKey,
global_pubkey: Ed25519PublicPoint,
nonce: int,
global_commit: Ed25519PublicPoint,
) -> Ed25519Signature:
"""Create a CoSi signature of `digest` with the supplied private key.
This function needs to know the global public key and global commitment.
"""
h = _ed25519.H(privkey)
a = _ed25519.decodecoord(h)
S = (nonce + _ed25519.Hint(global_commit + global_pubkey + digest) * a) % _ed25519.l
return Ed25519Signature(_ed25519.encodeint(S)) | Create a CoSi signature of `digest` with the supplied private key.
This function needs to know the global public key and global commitment. |
def tran_hash(self, a, b, c, n):
"""implementation of the tran53 hash function"""
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255) | implementation of the tran53 hash function |
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb
"""
response = HttpResponse()
response['Allow'] = ', '.join(self.allowed_methods)
response['Content-Length'] = 0
return response | Handles responding to requests for the OPTIONS HTTP verb |
def hull(script, reorient_normal=True):
""" Calculate the convex hull with Qhull library
http://www.qhull.org/html/qconvex.htm
The convex hull of a set of points is the boundary of the minimal convex
set containing the given non-empty finite set of points.
Args:
script: the FilterScript object or script filename to write
the filter to.
reorient_normal (bool): Re-orient all faces coherentely after hull
operation.
Layer stack:
Creates 1 new layer 'Convex Hull'
Current layer is changed to new layer
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Convex Hull">\n',
' <Param name="reorient" ',
'value="{}" '.format(str(reorient_normal).lower()),
'description="Re-orient all faces coherentely" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Convex Hull')
return None | Calculate the convex hull with Qhull library
http://www.qhull.org/html/qconvex.htm
The convex hull of a set of points is the boundary of the minimal convex
set containing the given non-empty finite set of points.
Args:
script: the FilterScript object or script filename to write
the filter to.
reorient_normal (bool): Re-orient all faces coherentely after hull
operation.
Layer stack:
Creates 1 new layer 'Convex Hull'
Current layer is changed to new layer
MeshLab versions:
2016.12
1.3.4BETA |
def h2o_mean_absolute_error(y_actual, y_predicted, weights=None):
"""
Mean absolute error regression loss.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: mean absolute error loss (best is 0.0).
"""
ModelBase._check_targets(y_actual, y_predicted)
return _colmean((y_predicted - y_actual).abs()) | Mean absolute error regression loss.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: mean absolute error loss (best is 0.0). |
def needle(reads):
"""
Run a Needleman-Wunsch alignment and return the two sequences.
@param reads: An iterable of two reads.
@return: A C{Reads} instance with the two aligned sequences.
"""
from tempfile import mkdtemp
from shutil import rmtree
dir = mkdtemp()
file1 = join(dir, 'file1.fasta')
with open(file1, 'w') as fp:
print(reads[0].toString('fasta'), end='', file=fp)
file2 = join(dir, 'file2.fasta')
with open(file2, 'w') as fp:
print(reads[1].toString('fasta'), end='', file=fp)
out = join(dir, 'result.fasta')
Executor().execute("needle -asequence '%s' -bsequence '%s' -auto "
"-outfile '%s' -aformat fasta" % (
file1, file2, out))
# Use 'list' in the following to force reading the FASTA from disk.
result = Reads(list(FastaReads(out)))
rmtree(dir)
return result | Run a Needleman-Wunsch alignment and return the two sequences.
@param reads: An iterable of two reads.
@return: A C{Reads} instance with the two aligned sequences. |
def db_check(name,
table=None,
**connection_args):
'''
Repairs the full database or just a given table
CLI Example:
.. code-block:: bash
salt '*' mysql.db_check dbname
salt '*' mysql.db_check dbname dbtable
'''
ret = []
if table is None:
# we need to check all tables
tables = db_tables(name, **connection_args)
for table in tables:
log.info('Checking table \'%s\' in db \'%s\'..', name, table)
ret.append(__check_table(name, table, **connection_args))
else:
log.info('Checking table \'%s\' in db \'%s\'..', name, table)
ret = __check_table(name, table, **connection_args)
return ret | Repairs the full database or just a given table
CLI Example:
.. code-block:: bash
salt '*' mysql.db_check dbname
salt '*' mysql.db_check dbname dbtable |
def dependency(app_label, model):
"""
Returns a Django 1.7+ style dependency tuple for inclusion in
migration.dependencies[]
"""
from django.db.migrations import swappable_dependency
return swappable_dependency(get_model_name(app_label, model)) | Returns a Django 1.7+ style dependency tuple for inclusion in
migration.dependencies[] |
def rmtree(path):
"""Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case"""
def onerror(func, path, exc_info):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
try:
func(path) # Will scream if still not possible to delete.
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
return shutil.rmtree(path, False, onerror) | Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case |
def add_parameters(self,template_file,in_file=None,pst_path=None):
""" add new parameters to a control file
Parameters
----------
template_file : str
template file
in_file : str(optional)
model input file. If None, template_file.replace('.tpl','') is used
pst_path : str(optional)
the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. Default is None
Returns
-------
new_par_data : pandas.DataFrame
the data for the new parameters that were added. If no new parameters are in the
new template file, returns None
Note
----
populates the new parameter information with default values
"""
assert os.path.exists(template_file),"template file '{0}' not found".format(template_file)
assert template_file != in_file
# get the parameter names in the template file
parnme = pst_utils.parse_tpl_file(template_file)
# find "new" parameters that are not already in the control file
new_parnme = [p for p in parnme if p not in self.parameter_data.parnme]
if len(new_parnme) == 0:
warnings.warn("no new parameters found in template file {0}".format(template_file),PyemuWarning)
new_par_data = None
else:
# extend pa
# rameter_data
new_par_data = pst_utils.populate_dataframe(new_parnme,pst_utils.pst_config["par_fieldnames"],
pst_utils.pst_config["par_defaults"],
pst_utils.pst_config["par_dtype"])
new_par_data.loc[new_parnme,"parnme"] = new_parnme
self.parameter_data = self.parameter_data.append(new_par_data)
if in_file is None:
in_file = template_file.replace(".tpl",'')
if pst_path is not None:
template_file = os.path.join(pst_path,os.path.split(template_file)[-1])
in_file = os.path.join(pst_path, os.path.split(in_file)[-1])
self.template_files.append(template_file)
self.input_files.append(in_file)
return new_par_data | add new parameters to a control file
Parameters
----------
template_file : str
template file
in_file : str(optional)
model input file. If None, template_file.replace('.tpl','') is used
pst_path : str(optional)
the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. Default is None
Returns
-------
new_par_data : pandas.DataFrame
the data for the new parameters that were added. If no new parameters are in the
new template file, returns None
Note
----
populates the new parameter information with default values |
def pb_id(self, pb_id: str):
"""Set the PB Id for this device."""
# FIXME(BMo) instead of creating the object to check if the PB exists
# use a method on PB List?
# ProcessingBlock(pb_id)
self.set_state(DevState.ON)
self._pb_id = pb_id | Set the PB Id for this device. |
def to_struct_file(self, f):
""" write a PEST-style structure file
Parameters
----------
f : (str or file handle)
file to write the GeoStruct information to
"""
if isinstance(f, str):
f = open(f,'w')
f.write("STRUCTURE {0}\n".format(self.name))
f.write(" NUGGET {0}\n".format(self.nugget))
f.write(" NUMVARIOGRAM {0}\n".format(len(self.variograms)))
for v in self.variograms:
f.write(" VARIOGRAM {0} {1}\n".format(v.name,v.contribution))
f.write(" TRANSFORM {0}\n".format(self.transform))
f.write("END STRUCTURE\n\n")
for v in self.variograms:
v.to_struct_file(f) | write a PEST-style structure file
Parameters
----------
f : (str or file handle)
file to write the GeoStruct information to |
def __read(path):
'''
Reads a File with contents in correct JSON format.
Returns the data as Python objects.
path - (string) path to the file
'''
try:
with open(path, 'r') as data_file:
data = data_file.read()
data = json.loads(data)
return data
except IOError as err:
pass
except Exception as err:
# Invalid JSON formatted files
pass | Reads a File with contents in correct JSON format.
Returns the data as Python objects.
path - (string) path to the file |
def _route_action(self, action, email):
"""
Given an action and an email (can be None), figure out what to do
with the validated inputs.
:param str action: The action. Must be one of self.valid_actions.
:type email: str or None
:param email: Either an email address, or None if the action doesn't
need an email address.
"""
connection = self._get_ses_connection()
if action == "verify":
connection.verify_email_address(email)
print("A verification email has been sent to %s." % email)
elif action == "delete":
connection.delete_verified_email_address(email)
print("You have deleted %s from your SES account." % email)
elif action == "list":
verified_result = connection.list_verified_email_addresses()
if len(verified_result.VerifiedEmailAddresses) > 0:
print("The following emails have been fully verified on your "\
"Amazon SES account:")
for vemail in verified_result.VerifiedEmailAddresses:
print (" %s" % vemail)
else:
print("Your account has no fully verified email addresses yet.") | Given an action and an email (can be None), figure out what to do
with the validated inputs.
:param str action: The action. Must be one of self.valid_actions.
:type email: str or None
:param email: Either an email address, or None if the action doesn't
need an email address. |
def _on_connection_open(self, connection):
"""
Callback invoked when the connection is successfully established.
Args:
connection (pika.connection.SelectConnection): The newly-estabilished
connection.
"""
_log.info("Successfully opened connection to %s", connection.params.host)
self._channel = connection.channel(on_open_callback=self._on_channel_open) | Callback invoked when the connection is successfully established.
Args:
connection (pika.connection.SelectConnection): The newly-estabilished
connection. |
def _get_data(self, file_id):
''' a helper method for retrieving the byte data of a file '''
title = '%s._get_data' % self.__class__.__name__
# request file data
try:
record_data = self.drive.get_media(fileId=file_id).execute()
except:
raise DriveConnectionError(title)
return record_data | a helper method for retrieving the byte data of a file |
def reset_failed(self, pk):
"""
reset failed counter
:param pk:
:return:
"""
TriggerService.objects.filter(consumer__name__id=pk).update(consumer_failed=0, provider_failed=0)
TriggerService.objects.filter(provider__name__id=pk).update(consumer_failed=0, provider_failed=0) | reset failed counter
:param pk:
:return: |
def propose_unif(self):
"""Propose a new live point by sampling *uniformly* within
the union of N-spheres defined by our live points."""
# Initialize a K-D Tree to assist nearest neighbor searches.
if self.use_kdtree:
kdtree = spatial.KDTree(self.live_u)
else:
kdtree = None
while True:
# Sample a point `u` from the union of N-spheres along with the
# number of overlapping spheres `q` at point `u`.
u, q = self.radfriends.sample(self.live_u, rstate=self.rstate,
return_q=True, kdtree=kdtree)
# Check if our sample is within the unit cube.
if unitcheck(u, self.nonperiodic):
# Accept the point with probability 1/q to account for
# overlapping balls.
if q == 1 or self.rstate.rand() < 1.0 / q:
break # if successful, we're done!
# Define the axes of the N-sphere.
ax = np.identity(self.npdim) * self.radfriends.radius
return u, ax | Propose a new live point by sampling *uniformly* within
the union of N-spheres defined by our live points. |
def subtype_ids(elements, subtype):
"""
returns the ids of all elements of a list that have a certain type,
e.g. show all the nodes that are ``TokenNode``\s.
"""
return [i for (i, element) in enumerate(elements)
if isinstance(element, subtype)] | returns the ids of all elements of a list that have a certain type,
e.g. show all the nodes that are ``TokenNode``\s. |
def on_exception(self, exception):
"""An exception occurred in the streaming thread"""
logger.error('Exception from stream!', exc_info=True)
self.streaming_exception = exception | An exception occurred in the streaming thread |
def __constructMetricsModules(self, metricSpecs):
"""
Creates the required metrics modules
Parameters:
-----------------------------------------------------------------------
metricSpecs:
A sequence of MetricSpec objects that specify which metric modules to
instantiate
"""
if not metricSpecs:
return
self.__metricSpecs = metricSpecs
for spec in metricSpecs:
if not InferenceElement.validate(spec.inferenceElement):
raise ValueError("Invalid inference element for metric spec: %r" %spec)
self.__metrics.append(metrics.getModule(spec))
self.__metricLabels.append(spec.getLabel()) | Creates the required metrics modules
Parameters:
-----------------------------------------------------------------------
metricSpecs:
A sequence of MetricSpec objects that specify which metric modules to
instantiate |
def feature_path_unset(self):
"""Copy features to memory and remove the association of the feature file."""
if not self.feature_file:
raise IOError('No feature file to unset')
with open(self.feature_path) as handle:
feats = list(GFF.parse(handle))
if len(feats) > 1:
log.warning('Too many sequences in GFF')
else:
tmp = feats[0].features
self.feature_dir = None
self.feature_file = None
self.features = tmp | Copy features to memory and remove the association of the feature file. |
def get_pid(name):
"""Check if process is running by name."""
if not shutil.which("pidof"):
return False
try:
subprocess.check_output(["pidof", "-s", name])
except subprocess.CalledProcessError:
return False
return True | Check if process is running by name. |
def run(self):
"""
this is the actual execution of the instrument thread: continuously read values from the probes
"""
eta = self.settings['noise_strength']
gamma = 2 * np.pi * self.settings['noise_bandwidth']
dt = 1. / self.settings['update frequency']
control = self.settings['control']
self._state = self._output
while self._stop is False:
A = -gamma * dt
noise = np.sqrt(2*gamma*eta)*np.random.randn()
self._state *= (1. + A)
self._state += noise + control
self._output = self._state
self.msleep(int(1e3 / self.settings['update frequency'])) | this is the actual execution of the instrument thread: continuously read values from the probes |
def _write_plist(self, root):
"""Write plist file based on our generated tree."""
# prettify the XML
indent_xml(root)
tree = ElementTree.ElementTree(root)
with open(self.preferences_file, "w") as prefs_file:
prefs_file.write(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" "
"\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
"<plist version=\"1.0\">\n")
tree.write(prefs_file, xml_declaration=False, encoding="utf-8")
prefs_file.write("</plist>") | Write plist file based on our generated tree. |
def post_unvote(self, post_id):
"""Action lets you unvote for a post (Requires login).
Parameters:
post_id (int):
"""
return self._get('posts/{0}/unvote.json'.format(post_id),
method='PUT', auth=True) | Action lets you unvote for a post (Requires login).
Parameters:
post_id (int): |
def restore_state(self):
"""Restore GUI state from configuration file."""
# restore last source path
last_source_path = setting(
'lastSourceDir', self.default_directory, expected_type=str)
self.source_directory.setText(last_source_path)
# restore path pdf output
last_output_dir = setting(
'lastOutputDir', self.default_directory, expected_type=str)
self.output_directory.setText(last_output_dir)
# restore default output dir combo box
use_default_output_dir = bool(setting(
'useDefaultOutputDir', True, expected_type=bool))
self.scenario_directory_radio.setChecked(
use_default_output_dir) | Restore GUI state from configuration file. |
def _ReformatMessageString(self, message_string):
"""Reformats the message string.
Args:
message_string (str): message string.
Returns:
str: message string in Python format() (PEP 3101) style.
"""
def _PlaceHolderSpecifierReplacer(match_object):
"""Replaces message string place holders into Python format() style."""
expanded_groups = []
for group in match_object.groups():
try:
place_holder_number = int(group, 10) - 1
expanded_group = '{{{0:d}:s}}'.format(place_holder_number)
except ValueError:
expanded_group = group
expanded_groups.append(expanded_group)
return ''.join(expanded_groups)
if not message_string:
return None
message_string = self._WHITE_SPACE_SPECIFIER_RE.sub(r'', message_string)
message_string = self._TEXT_SPECIFIER_RE.sub(r'\\\1', message_string)
message_string = self._CURLY_BRACKETS.sub(r'\1\1', message_string)
return self._PLACE_HOLDER_SPECIFIER_RE.sub(
_PlaceHolderSpecifierReplacer, message_string) | Reformats the message string.
Args:
message_string (str): message string.
Returns:
str: message string in Python format() (PEP 3101) style. |
def iter_delimiter(self, byte_size=8192):
""" Generalization of the default iter file delimited by '\n'.
Note:
The newline string can be arbitrarily long; it need not be restricted to a
single character. You can also set the read size and control whether or not
the newline string is left on the end of the iterated lines. Setting
newline to '\0' is particularly good for use with an input file created with
something like "os.popen('find -print0')".
Args:
byte_size (integer): Number of bytes to be read at each time.
"""
partial = u''
while True:
read_chars = self.read(byte_size)
if not read_chars: break
partial += read_chars
lines = partial.split(self.delimiter)
partial = lines.pop()
for line in lines:
yield line + self.delimiter
if partial:
yield partial | Generalization of the default iter file delimited by '\n'.
Note:
The newline string can be arbitrarily long; it need not be restricted to a
single character. You can also set the read size and control whether or not
the newline string is left on the end of the iterated lines. Setting
newline to '\0' is particularly good for use with an input file created with
something like "os.popen('find -print0')".
Args:
byte_size (integer): Number of bytes to be read at each time. |
async def requirements(client: Client, search: str) -> dict:
"""
GET list of requirements for a given UID/Public key
:param client: Client to connect to the api
:param search: UID or public key
:return:
"""
return await client.get(MODULE + '/requirements/%s' % search, schema=REQUIREMENTS_SCHEMA) | GET list of requirements for a given UID/Public key
:param client: Client to connect to the api
:param search: UID or public key
:return: |
def get_stack_frames(error_stack: bool = True) -> list:
"""
Returns a list of the current stack frames, which are pruned focus on the
Cauldron code where the relevant information resides.
"""
cauldron_path = environ.paths.package()
resources_path = environ.paths.resources()
frames = (
list(traceback.extract_tb(sys.exc_info()[-1]))
if error_stack else
traceback.extract_stack()
).copy()
def is_cauldron_code(test_filename: str) -> bool:
if not test_filename or not test_filename.startswith(cauldron_path):
return False
if test_filename.startswith(resources_path):
return False
return True
while len(frames) > 1 and is_cauldron_code(frames[0].filename):
frames.pop(0)
return frames | Returns a list of the current stack frames, which are pruned focus on the
Cauldron code where the relevant information resides. |
def weave_instance(instance, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options):
"""
Low-level weaver for instances.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
if bag.has(instance):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_instance (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)",
instance, aspect, methods, lazy, options)
def fixup(func):
return func.__get__(instance, type(instance))
fixed_aspect = aspect + [fixup] if isinstance(aspect, (list, tuple)) else [aspect, fixup]
for attr in dir(instance):
func = getattr(instance, attr)
if method_matches(attr):
if ismethod(func):
if hasattr(func, '__func__'):
realfunc = func.__func__
else:
realfunc = func.im_func
entanglement.merge(
patch_module(instance, attr, _checked_apply(fixed_aspect, realfunc, module=None), **options)
)
return entanglement | Low-level weaver for instances.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object. |
def parse_config_file(config_file, skip_unknown=False):
"""Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
Raises:
IOError: If `config_file` cannot be read using any register file reader.
"""
for reader, existence_check in _FILE_READERS:
if existence_check(config_file):
with reader(config_file) as f:
parse_config(f, skip_unknown=skip_unknown)
return
raise IOError('Unable to open file: {}'.format(config_file)) | Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
Raises:
IOError: If `config_file` cannot be read using any register file reader. |
def _should_defer(input_layer, args, kwargs):
"""Checks to see if any of the args are templates."""
for arg in itertools.chain([input_layer], args, six.itervalues(kwargs)):
if isinstance(arg, (_DeferredLayer, UnboundVariable)):
return True
elif (isinstance(arg, collections.Sequence) and
not isinstance(arg, six.string_types)):
if _should_defer(None, arg, {}):
return True
elif isinstance(arg, collections.Mapping):
if _should_defer(None, (), arg):
return True
return False | Checks to see if any of the args are templates. |
def correlation_plot(self, data):
""" Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap.
"""
# CHECK: Add saved filename in result.json
fig = plt.figure(Plot_Data.count)
corr = data.corr()
ax = sns.heatmap(corr)
Plot_Data.count += 1
return fig | Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap. |
def _pack_with_custom_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset.
"""
from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top
# faster and better packing but requires custom-built binary.
k1, k2 = keys
def map_fn_custom(x):
"""Map-function."""
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
}
return tf.data.Dataset.from_tensor_slices(packed)
dataset = dataset.flat_map(map_fn_custom)
return dataset | Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset. |
def update_from_model_change(self, oldmodel, newmodel, tile):
"""
Update various internal variables from a model update from oldmodel to
newmodel for the tile `tile`
"""
self._loglikelihood -= self._calc_loglikelihood(oldmodel, tile=tile)
self._loglikelihood += self._calc_loglikelihood(newmodel, tile=tile)
self._residuals[tile.slicer] = self._data[tile.slicer] - newmodel | Update various internal variables from a model update from oldmodel to
newmodel for the tile `tile` |
def generic_visit(self, node: AST, dfltChaining: bool = True) -> str:
"""Default handler, called if no explicit visitor function exists for
a node.
"""
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value) | Default handler, called if no explicit visitor function exists for
a node. |
def is_link(url, processed, files):
"""
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
"""
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False | Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled |
def extract_index(index_data, global_index=False):
'''
Instantiates and returns an AllIndex object given a valid index
configuration
CLI Example:
salt myminion boto_dynamodb.extract_index index
'''
parsed_data = {}
keys = []
for key, value in six.iteritems(index_data):
for item in value:
for field, data in six.iteritems(item):
if field == 'hash_key':
parsed_data['hash_key'] = data
elif field == 'hash_key_data_type':
parsed_data['hash_key_data_type'] = data
elif field == 'range_key':
parsed_data['range_key'] = data
elif field == 'range_key_data_type':
parsed_data['range_key_data_type'] = data
elif field == 'name':
parsed_data['name'] = data
elif field == 'read_capacity_units':
parsed_data['read_capacity_units'] = data
elif field == 'write_capacity_units':
parsed_data['write_capacity_units'] = data
elif field == 'includes':
parsed_data['includes'] = data
elif field == 'keys_only':
parsed_data['keys_only'] = True
if parsed_data['hash_key']:
keys.append(
HashKey(
parsed_data['hash_key'],
data_type=parsed_data['hash_key_data_type']
)
)
if parsed_data.get('range_key'):
keys.append(
RangeKey(
parsed_data['range_key'],
data_type=parsed_data['range_key_data_type']
)
)
if (
global_index and
parsed_data['read_capacity_units'] and
parsed_data['write_capacity_units']):
parsed_data['throughput'] = {
'read': parsed_data['read_capacity_units'],
'write': parsed_data['write_capacity_units']
}
if parsed_data['name'] and keys:
if global_index:
if parsed_data.get('keys_only') and parsed_data.get('includes'):
raise SaltInvocationError('Only one type of GSI projection can be used.')
if parsed_data.get('includes'):
return GlobalIncludeIndex(
parsed_data['name'],
parts=keys,
throughput=parsed_data['throughput'],
includes=parsed_data['includes']
)
elif parsed_data.get('keys_only'):
return GlobalKeysOnlyIndex(
parsed_data['name'],
parts=keys,
throughput=parsed_data['throughput'],
)
else:
return GlobalAllIndex(
parsed_data['name'],
parts=keys,
throughput=parsed_data['throughput']
)
else:
return AllIndex(
parsed_data['name'],
parts=keys
) | Instantiates and returns an AllIndex object given a valid index
configuration
CLI Example:
salt myminion boto_dynamodb.extract_index index |
def get_record_types(self):
"""Gets the record types available in this object.
A record ``Type`` explicitly indicates the specification of an
interface to the record. A record may or may not inherit other
record interfaces through interface inheritance in which case
support of a record type may not be explicit in the returned
list. Interoperability with the typed interface to this object
should be performed through ``hasRecordType()``.
return: (osid.type.TypeList) - the record types available
*compliance: mandatory -- This method must be implemented.*
"""
from ..type.objects import TypeList
type_list = []
for type_idstr in self._supported_record_type_ids:
type_list.append(Type(**self._record_type_data_sets[Id(type_idstr).get_identifier()]))
return TypeList(type_list) | Gets the record types available in this object.
A record ``Type`` explicitly indicates the specification of an
interface to the record. A record may or may not inherit other
record interfaces through interface inheritance in which case
support of a record type may not be explicit in the returned
list. Interoperability with the typed interface to this object
should be performed through ``hasRecordType()``.
return: (osid.type.TypeList) - the record types available
*compliance: mandatory -- This method must be implemented.* |
def translate_abstract_actions_to_keys(self, abstract):
"""
Translates a list of tuples ([pretty mapping], [value]) to a list of tuples ([some key], [translated value])
each single item in abstract will undergo the following translation:
Example1:
we want: "MoveRight": 5.0
possible keys for the action are: ("Right", 1.0), ("Left", -1.0)
result: "Right": 5.0 * 1.0 = 5.0
Example2:
we want: "MoveRight": -0.5
possible keys for the action are: ("Left", -1.0), ("Right", 1.0)
result: "Left": -0.5 * -1.0 = 0.5 (same as "Right": -0.5)
"""
# Solve single tuple with name and value -> should become a list (len=1) of this tuple.
if len(abstract) >= 2 and not isinstance(abstract[1], (list, tuple)):
abstract = list((abstract,))
# Now go through the list and translate each axis into an actual keyboard key (or mouse event/etc..).
actions, axes = [], []
for a in abstract:
# first_key = key-name (action mapping or discretized axis mapping) OR tuple (key-name, scale) (continuous
# axis mapping)
first_key = self.action_space_desc[a[0]]["keys"][0]
# action mapping
if isinstance(first_key, (bytes, str)):
actions.append((first_key, a[1]))
# axis mapping
elif isinstance(first_key, tuple):
axes.append((first_key[0], a[1] * first_key[1]))
else:
raise TensorForceError("action_space_desc contains unsupported type for key {}!".format(a[0]))
return actions, axes | Translates a list of tuples ([pretty mapping], [value]) to a list of tuples ([some key], [translated value])
each single item in abstract will undergo the following translation:
Example1:
we want: "MoveRight": 5.0
possible keys for the action are: ("Right", 1.0), ("Left", -1.0)
result: "Right": 5.0 * 1.0 = 5.0
Example2:
we want: "MoveRight": -0.5
possible keys for the action are: ("Left", -1.0), ("Right", 1.0)
result: "Left": -0.5 * -1.0 = 0.5 (same as "Right": -0.5) |
def prior_to_xarray(self):
"""Convert prior samples to xarray."""
prior = self.prior
# filter posterior_predictive and log_likelihood
prior_predictive = self.prior_predictive
if prior_predictive is None:
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
ignore = prior_predictive + ["lp__"]
data = get_draws(prior, ignore=ignore)
return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims) | Convert prior samples to xarray. |
def get_model_field_label_and_value(instance, field_name) -> (str, str):
"""
Returns model field label and value.
:param instance: Model instance
:param field_name: Model attribute name
:return: (label, value) tuple
"""
label = field_name
value = str(getattr(instance, field_name))
for f in instance._meta.fields:
if f.attname == field_name:
label = f.verbose_name
if hasattr(f, 'choices') and len(f.choices) > 0:
value = choices_label(f.choices, value)
break
return label, force_text(value) | Returns model field label and value.
:param instance: Model instance
:param field_name: Model attribute name
:return: (label, value) tuple |
def export(self, name, columns, points):
"""Write the points to the Cassandra cluster."""
logger.debug("Export {} stats to Cassandra".format(name))
# Remove non number stats and convert all to float (for Boolean)
data = {k: float(v) for (k, v) in dict(zip(columns, points)).iteritems() if isinstance(v, Number)}
# Write input to the Cassandra table
try:
stmt = "INSERT INTO {} (plugin, time, stat) VALUES (?, ?, ?)".format(self.table)
query = self.session.prepare(stmt)
self.session.execute(
query,
(name, uuid_from_time(datetime.now()), data)
)
except Exception as e:
logger.error("Cannot export {} stats to Cassandra ({})".format(name, e)) | Write the points to the Cassandra cluster. |
def create_notebook(self, position=Gtk.PositionType.TOP):
"""
Function creates a notebook
"""
notebook = Gtk.Notebook()
notebook.set_tab_pos(position)
notebook.set_show_border(True)
return notebook | Function creates a notebook |
def loop(self, sequences=None, outputs=None, non_sequences=None, block=None, **kwargs):
"""
Start a loop.
Usage:
```
with deepy.graph.loop(sequences={"x": x}, outputs={"o": None}) as vars:
vars.o = vars.x + 1
loop_outputs = deepy.graph.loop_outputs()
result = loop_outputs.o
```
"""
from loop import Loop
return Loop(sequences, outputs, non_sequences, block, **kwargs) | Start a loop.
Usage:
```
with deepy.graph.loop(sequences={"x": x}, outputs={"o": None}) as vars:
vars.o = vars.x + 1
loop_outputs = deepy.graph.loop_outputs()
result = loop_outputs.o
``` |
def groups(self):
"""Get the list of Groups (by dn) that the bound CSH LDAP member object
is in.
"""
group_list = []
all_groups = self.get('memberof')
for group_dn in all_groups:
if self.__ldap_group_ou__ in group_dn:
group_list.append(group_dn)
return group_list | Get the list of Groups (by dn) that the bound CSH LDAP member object
is in. |
def match(self, table, nomatch=0):
"""
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
"""
return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None)) | Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell. |
def min(self, array, role = None):
"""
Return the minimum value of ``array`` for the entity members.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.min(salaries)
>>> array([0])
>>> household.min(salaries, role = Household.PARENT) # Assuming the 1st two persons are parents
>>> array([1500])
"""
return self.reduce(array, reducer = np.minimum, neutral_element = np.infty, role = role) | Return the minimum value of ``array`` for the entity members.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.min(salaries)
>>> array([0])
>>> household.min(salaries, role = Household.PARENT) # Assuming the 1st two persons are parents
>>> array([1500]) |
def run_filter(vrn_file, align_bam, ref_file, data, items):
"""Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER.
"""
if not should_filter(items) or not vcfutils.vcf_has_variants(vrn_file):
return data
else:
raw_file = "%s-damage.vcf" % utils.splitext_plus(vrn_file)[0]
out_plot_files = ["%s%s" % (utils.splitext_plus(raw_file)[0], ext)
for ext in ["_seq_bias_simplified.pdf", "_pcr_bias_simplified.pdf"]]
if not utils.file_uptodate(raw_file, vrn_file) and not utils.file_uptodate(raw_file + ".gz", vrn_file):
with file_transaction(items[0], raw_file) as tx_out_file:
# Does not apply --qcSummary plotting due to slow runtimes
cmd = ["dkfzbiasfilter.py", "--filterCycles", "1", "--passOnly",
"--tempFolder", os.path.dirname(tx_out_file),
vrn_file, align_bam, ref_file, tx_out_file]
do.run(cmd, "Filter low frequency variants for DNA damage and strand bias")
for out_plot in out_plot_files:
tx_plot_file = os.path.join("%s_qcSummary" % utils.splitext_plus(tx_out_file)[0], "plots",
os.path.basename(out_plot))
if utils.file_exists(tx_plot_file):
shutil.move(tx_plot_file, out_plot)
raw_file = vcfutils.bgzip_and_index(raw_file, items[0]["config"])
data["vrn_file"] = _filter_to_info(raw_file, items[0])
out_plot_files = [x for x in out_plot_files if utils.file_exists(x)]
data["damage_plots"] = out_plot_files
return data | Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER. |
def load(path, group=None, sel=None, unpack=False):
"""
Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save
"""
with tables.open_file(path, mode='r') as h5file:
pathtable = {} # dict to keep track of objects already loaded
if group is not None:
if isinstance(group, str):
data = _load_specific_level(h5file, h5file, group, sel=sel,
pathtable=pathtable)
else: # Assume group is a list or tuple
data = []
for g in group:
data_i = _load_specific_level(h5file, h5file, g, sel=sel,
pathtable=pathtable)
data.append(data_i)
data = tuple(data)
else:
grp = h5file.root
auto_unpack = (DEEPDISH_IO_UNPACK in grp._v_attrs and
grp._v_attrs[DEEPDISH_IO_UNPACK])
do_unpack = unpack or auto_unpack
if do_unpack and len(grp._v_children) == 1:
name = next(iter(grp._v_children))
data = _load_specific_level(h5file, grp, name, sel=sel,
pathtable=pathtable)
do_unpack = False
elif sel is not None:
raise ValueError("Must specify group with `sel` unless it "
"automatically unpacks")
else:
data = _load_level(h5file, grp, pathtable)
if DEEPDISH_IO_VERSION_STR in grp._v_attrs:
v = grp._v_attrs[DEEPDISH_IO_VERSION_STR]
else:
v = 0
if v > IO_VERSION:
warnings.warn('This file was saved with a newer version of '
'deepdish. Please upgrade to make sure it loads '
'correctly.')
# Attributes can't be unpacked with the method above, so fall back
# to this
if do_unpack and isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
return data | Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save |
def run(self):
"""
Ping entries to a directory in a thread.
"""
logger = getLogger('zinnia.ping.directory')
socket.setdefaulttimeout(self.timeout)
for entry in self.entries:
reply = self.ping_entry(entry)
self.results.append(reply)
logger.info('%s : %s', self.server_name, reply['message'])
socket.setdefaulttimeout(None) | Ping entries to a directory in a thread. |
def get_summary(profile_block_list, maxlines=20):
"""
References:
https://github.com/rkern/line_profiler
"""
time_list = [get_block_totaltime(block) for block in profile_block_list]
time_list = [time if time is not None else -1 for time in time_list]
blockid_list = [get_block_id(block) for block in profile_block_list]
sortx = ut.list_argsort(time_list)
sorted_time_list = ut.take(time_list, sortx)
sorted_blockid_list = ut.take(blockid_list, sortx)
aligned_blockid_list = ut.util_str.align_lines(sorted_blockid_list, ':')
summary_lines = [('%6.2f seconds - ' % time) + line
for time, line in
zip(sorted_time_list, aligned_blockid_list)]
#summary_header = ut.codeblock(
# '''
# CLEANED PROFILE OUPUT
# The Pystone timings are not from kernprof, so they may include kernprof
# overhead, whereas kernprof timings do not (unless the line being
# profiled is also decorated with kernrof)
# The kernprof times are reported in Timer Units
# ''')
summary_lines_ = ut.listclip(summary_lines, maxlines, fromback=True)
summary_text = '\n'.join(summary_lines_)
return summary_text | References:
https://github.com/rkern/line_profiler |
def get_header(self, idx, formatted=False):
"""
Return a list of the variable names at the given indices
"""
header = self._uname if not formatted else self._fname
return [header[x] for x in idx] | Return a list of the variable names at the given indices |
def SendKey(key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate typing a key.
key: int, a value in class `Keys`.
"""
keybd_event(key, 0, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey, 0)
keybd_event(key, 0, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey, 0)
time.sleep(waitTime) | Simulate typing a key.
key: int, a value in class `Keys`. |
def _set_port(self, v, load=False):
"""
Setter method for port, mapped from YANG variable /qos/cpu/slot/port_group/port (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port.port, is_container='container', presence=False, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS port parameters', u'alt-name': u'port', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port.port, is_container='container', presence=False, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS port parameters', u'alt-name': u'port', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='container', is_config=True)""",
})
self.__port = t
if hasattr(self, '_set'):
self._set() | Setter method for port, mapped from YANG variable /qos/cpu/slot/port_group/port (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly. |
def open(self, session, resource_name,
access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE):
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param session: Resource Manager session
(should always be a session returned
from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes)
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode`
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError('open_timeout (%r) must be an integer (or compatible type)' % open_timeout)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return 0, constants.StatusCode.error_invalid_resource_name
# Loops through all session types, tries to parse the resource name and if ok, open it.
cls = sessions.Session.get_session_class(parsed.interface_type_const, parsed.resource_class)
sess = cls(session, resource_name, parsed)
try:
sess.device = self.devices[sess.attrs[constants.VI_ATTR_RSRC_NAME]]
except KeyError:
return 0, constants.StatusCode.error_resource_not_found
return self._register(sess), constants.StatusCode.success | Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param session: Resource Manager session
(should always be a session returned
from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes)
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode` |
def QA_indicator_ASI(DataFrame, M1=26, M2=10):
"""
LC=REF(CLOSE,1);
AA=ABS(HIGH-LC);
BB=ABS(LOW-LC);
CC=ABS(HIGH-REF(LOW,1));
DD=ABS(LC-REF(OPEN,1));
R=IF(AA>BB AND AA>CC,AA+BB/2+DD/4,IF(BB>CC AND BB>AA,BB+AA/2+DD/4,CC+DD/4));
X=(CLOSE-LC+(CLOSE-OPEN)/2+LC-REF(OPEN,1));
SI=16*X/R*MAX(AA,BB);
ASI:SUM(SI,M1);
ASIT:MA(ASI,M2);
"""
CLOSE = DataFrame['close']
HIGH = DataFrame['high']
LOW = DataFrame['low']
OPEN = DataFrame['open']
LC = REF(CLOSE, 1)
AA = ABS(HIGH - LC)
BB = ABS(LOW-LC)
CC = ABS(HIGH - REF(LOW, 1))
DD = ABS(LC - REF(OPEN, 1))
R = IFAND(AA > BB, AA > CC, AA+BB/2+DD/4,
IFAND(BB > CC, BB > AA, BB+AA/2+DD/4, CC+DD/4))
X = (CLOSE - LC + (CLOSE - OPEN) / 2 + LC - REF(OPEN, 1))
SI = 16*X/R*MAX(AA, BB)
ASI = SUM(SI, M1)
ASIT = MA(ASI, M2)
return pd.DataFrame({
'ASI': ASI, 'ASIT': ASIT
}) | LC=REF(CLOSE,1);
AA=ABS(HIGH-LC);
BB=ABS(LOW-LC);
CC=ABS(HIGH-REF(LOW,1));
DD=ABS(LC-REF(OPEN,1));
R=IF(AA>BB AND AA>CC,AA+BB/2+DD/4,IF(BB>CC AND BB>AA,BB+AA/2+DD/4,CC+DD/4));
X=(CLOSE-LC+(CLOSE-OPEN)/2+LC-REF(OPEN,1));
SI=16*X/R*MAX(AA,BB);
ASI:SUM(SI,M1);
ASIT:MA(ASI,M2); |
def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation
"""
self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri'])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(config['TransformJobName'],
'TransformJobStatus',
self.describe_transform_job,
check_interval, max_ingestion_time
)
return response | Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation |
def condition_on_par_knowledge(cov,par_knowledge_dict):
""" experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception("par knowledge dict parameters not found: {0}".\
format(','.join(missing)))
# build the selection matrix and sigma epsilon
#sel = cov.zero2d
#sel = pyemu.Matrix(x=np.zeros((cov.shape[0],1)),row_names=cov.row_names,col_names=['sel'])
sel = cov.zero2d
sigma_ep = cov.zero2d
for parnme,var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
#sel.x[idx,:] = 1.0
sel.x[idx,idx] = 1.0
sigma_ep.x[idx,idx] = var
#print(sigma_ep.x)
#q = sigma_ep.inv
#cov_inv = cov.inv
print(sel)
term2 = sel * cov * sel.T
#term2 += sigma_ep
#term2 = cov
print(term2)
term2 = term2.inv
term2 *= sel
term2 *= cov
new_cov = cov - term2
return new_cov | experimental function to include conditional prior information
for one or more parameters in a full covariance matrix |
def infer_isinstance(callnode, context=None):
"""Infer isinstance calls
:param nodes.Call callnode: an isinstance call
:param InferenceContext: context for call
(currently unused but is a common interface for inference)
:rtype nodes.Const: Boolean Const value of isinstance call
:raises UseInferenceDefault: If the node cannot be inferred
"""
call = arguments.CallSite.from_call(callnode)
if call.keyword_arguments:
# isinstance doesn't support keyword arguments
raise UseInferenceDefault("TypeError: isinstance() takes no keyword arguments")
if len(call.positional_arguments) != 2:
raise UseInferenceDefault(
"Expected two arguments, got {count}".format(
count=len(call.positional_arguments)
)
)
# The left hand argument is the obj to be checked
obj_node, class_or_tuple_node = call.positional_arguments
# The right hand argument is the class(es) that the given
# obj is to be check is an instance of
try:
class_container = _class_or_tuple_to_container(
class_or_tuple_node, context=context
)
except InferenceError:
raise UseInferenceDefault
try:
isinstance_bool = helpers.object_isinstance(obj_node, class_container, context)
except AstroidTypeError as exc:
raise UseInferenceDefault("TypeError: " + str(exc))
except MroError as exc:
raise UseInferenceDefault from exc
if isinstance_bool is util.Uninferable:
raise UseInferenceDefault
return nodes.Const(isinstance_bool) | Infer isinstance calls
:param nodes.Call callnode: an isinstance call
:param InferenceContext: context for call
(currently unused but is a common interface for inference)
:rtype nodes.Const: Boolean Const value of isinstance call
:raises UseInferenceDefault: If the node cannot be inferred |
def format_prefix(filename, sres):
"""
Prefix to a filename in the directory listing. This is to make the
listing similar to an output of "ls -alh".
"""
try:
pwent = pwd.getpwuid(sres.st_uid)
user = pwent.pw_name
except KeyError:
user = sres.st_uid
try:
grent = grp.getgrgid(sres.st_gid)
group = grent.gr_name
except KeyError:
group = sres.st_gid
return '%s %3d %10s %10s %10d %s' % (
format_mode(sres),
sres.st_nlink,
user,
group,
sres.st_size,
format_mtime(sres.st_mtime),
) | Prefix to a filename in the directory listing. This is to make the
listing similar to an output of "ls -alh". |
def _prev_month(self):
"""Updated calendar to show the previous month."""
self._canvas.place_forget()
self._date = self._date - self.timedelta(days=1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() | Updated calendar to show the previous month. |
def get_oc_api_token():
"""
Get token of user logged in OpenShift cluster
:return: str, API token
"""
oc_command_exists()
try:
return run_cmd(["oc", "whoami", "-t"], return_output=True).rstrip() # remove '\n'
except subprocess.CalledProcessError as ex:
raise ConuException("oc whoami -t failed: %s" % ex) | Get token of user logged in OpenShift cluster
:return: str, API token |
def _updateB(oldB, B, W, degrees, damping, inds, backinds): # pragma: no cover
'''belief update function.'''
for j,d in enumerate(degrees):
kk = inds[j]
bk = backinds[j]
if d == 0:
B[kk,bk] = -np.inf
continue
belief = W[kk,bk] + W[j]
oldBj = oldB[j]
if d == oldBj.shape[0]:
bth = quickselect(-oldBj, d-1)
bplus = -1
else:
bth,bplus = quickselect(-oldBj, d-1, d)
belief -= np.where(oldBj >= oldBj[bth], oldBj[bplus], oldBj[bth])
B[kk,bk] = damping*belief + (1-damping)*oldB[kk,bk] | belief update function. |
def Record(self, obj):
"""Records the object as visited.
Args:
obj: visited object.
Returns:
True if the object hasn't been previously visited or False if it has
already been recorded or the quota has been exhausted.
"""
if len(self._visit_recorder_objects) >= _MAX_VISIT_OBJECTS:
return False
obj_id = id(obj)
if obj_id in self._visit_recorder_objects:
return False
self._visit_recorder_objects[obj_id] = obj
return True | Records the object as visited.
Args:
obj: visited object.
Returns:
True if the object hasn't been previously visited or False if it has
already been recorded or the quota has been exhausted. |
def copy_bulma_files(self):
"""
Copies Bulma static files from package's static/bulma into project's
STATIC_ROOT/bulma
"""
original_bulma_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'static',
'bulma'
)
shutil.copytree(original_bulma_dir, self.static_root_bulma_dir) | Copies Bulma static files from package's static/bulma into project's
STATIC_ROOT/bulma |
def ui_extensions(self):
"""
Provides access to UI extensions management methods.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/ui-extensions
:return: :class:`EnvironmentUIExtensionsProxy <contentful_management.ui_extensions_proxy.EnvironmentUIExtensionsProxy>` object.
:rtype: contentful.ui_extensions_proxy.EnvironmentUIExtensionsProxy
Usage:
>>> ui_extensions_proxy = environment.ui_extensions()
<EnvironmentUIExtensionsProxy space_id="cfexampleapi" environment_id="master">
"""
return EnvironmentUIExtensionsProxy(self._client, self.space.id, self.id) | Provides access to UI extensions management methods.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/ui-extensions
:return: :class:`EnvironmentUIExtensionsProxy <contentful_management.ui_extensions_proxy.EnvironmentUIExtensionsProxy>` object.
:rtype: contentful.ui_extensions_proxy.EnvironmentUIExtensionsProxy
Usage:
>>> ui_extensions_proxy = environment.ui_extensions()
<EnvironmentUIExtensionsProxy space_id="cfexampleapi" environment_id="master"> |
def _get_pathcost_func(
name: str
) -> Callable[[int, int, int, int, Any], float]:
"""Return a properly cast PathCostArray callback."""
return ffi.cast( # type: ignore
"TCOD_path_func_t", ffi.addressof(lib, name)
) | Return a properly cast PathCostArray callback. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.