code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def remove_user(self, name):
"""Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
:Parameters:
- `name`: the name of the user to remove
"""
try:
cmd = SON([("dropUser", name)])
# Don't send {} as writeConcern.
if self.write_concern.acknowledged and self.write_concern.document:
cmd["writeConcern"] = self.write_concern.document
self.command(cmd)
except OperationFailure as exc:
# See comment in add_user try / except above.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
coll = self._collection_default_options('system.users')
coll.delete_one({"user": name})
return
raise | Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
:Parameters:
- `name`: the name of the user to remove |
def is_inexact(arg):
'''
is_inexact(x) yields True if x is a number represented by floating-point data (i.e., either a
non-integer real number or a complex number) and False otherwise.
'''
return (is_inexact(mag(arg)) if is_quantity(arg) else
is_npscalar(u, np.inexact) or is_npvalue(arg, np.inexact)) | is_inexact(x) yields True if x is a number represented by floating-point data (i.e., either a
non-integer real number or a complex number) and False otherwise. |
def set_defaults(self, default_values, recursive = False):
"""
Set default values from specified Parameters and returns a new Parameters object.
:param default_values: Parameters with default parameter values.
:param recursive: (optional) true to perform deep copy, and false for shallow copy. Default: false
:return: a new Parameters object.
"""
result = Parameters()
if recursive:
RecursiveObjectWriter.copy_properties(result, default_values)
RecursiveObjectWriter.copy_properties(result, self)
else:
ObjectWriter.set_properties(result, default_values)
ObjectWriter.set_properties(result, self)
return result | Set default values from specified Parameters and returns a new Parameters object.
:param default_values: Parameters with default parameter values.
:param recursive: (optional) true to perform deep copy, and false for shallow copy. Default: false
:return: a new Parameters object. |
def get_stdev(self, asset_type):
"""
Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision
"""
load_times = []
# Handle edge cases like TTFB
if asset_type == 'ttfb':
for page in self.pages:
if page.time_to_first_byte is not None:
load_times.append(page.time_to_first_byte)
elif asset_type not in self.asset_types and asset_type != 'page':
raise ValueError('asset_type must be one of:\nttfb\n{0}'.format(
'\n'.join(self.asset_types)))
else:
load_times = self.get_load_times(asset_type)
if not load_times or not sum(load_times):
return 0
return round(stdev(load_times),
self.decimal_precision) | Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision |
def selfconsistency(self, u_int, J_coup, mean_field_prev=None):
"""Iterates over the hamiltonian to get the stable selfcosistent one"""
if mean_field_prev is None:
mean_field_prev = np.array([self.param['ekin']]*2)
hlog = [mean_field_prev]
self.oper['Hint'] = self.inter_spin_hamiltonian(u_int, J_coup)
converging = True
half_fill = (self.param['populations'] == 0.5).all()
while converging:
if half_fill:
self.update_H(hlog[-1], self.param['lambda'])
else:
res = root(self.restriction, self.param['lambda'], (hlog[-1]))#, method='lm')
if not res.success:
res.x = res.x * 0.5 + 0.5*self.param['lambda']
self.update_H(self.mean_field()*0.5 + 0.5*hlog[-1], res.x)
print('fail', self.param['populations'][3:5])
if (self.quasiparticle_weight() < 0.001).all():
return hlog
self.param['lambda'] = res.x
hlog.append(self.mean_field())
converging = (abs(hlog[-1] - hlog[-2]) > self.param['tol']).all() \
or (abs(self.restriction(self.param['lambda'], hlog[-1])) > self.param['tol']).all()
return hlog | Iterates over the hamiltonian to get the stable selfcosistent one |
def column_rename(self, existing_name, hsh=None):
"""
Like unique_name, but in addition must be unique to each column of this
feature. accomplishes this by prepending readable string to existing
column name and replacing unique hash at end of column name.
"""
try:
existing_name = str(existing_name)
except UnicodeEncodeError:
pass
if hsh is None:
hsh = self._hash()
if self._name:
return '%s(%s) [%s]' %(self._name, self._remove_hashes(existing_name),
hsh)
return '%s [%s]'%(self._remove_hashes(existing_name),
hsh) | Like unique_name, but in addition must be unique to each column of this
feature. accomplishes this by prepending readable string to existing
column name and replacing unique hash at end of column name. |
def _subthread_handle_accepted(self, client):
"""Gets accepted clients from the queue object and sets up the client socket.
The client can then be found in the clients dictionary with the socket object
as the key.
"""
conn, addr = client
if self.handle_incoming(conn, addr):
logging.info('Accepted connection from client: {}'.format(addr))
conn.setblocking(False)
self.clients[conn] = addr
self.register(conn)
else:
logging.info('Refused connection from client: {}'.format(addr))
self.disconnect(conn) | Gets accepted clients from the queue object and sets up the client socket.
The client can then be found in the clients dictionary with the socket object
as the key. |
def cublasStrmm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, C, ldc):
"""
Matrix-matrix product for real triangular matrix.
"""
status = _libcublas.cublasStrmm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb, int(C), ldc)
cublasCheckStatus(status) | Matrix-matrix product for real triangular matrix. |
def unlink_chunk(self, x, z):
"""
Remove a chunk from the header of the region file.
Fragmentation is not a problem, chunks are written to free sectors when possible.
"""
# This function fails for an empty file. If that is the case, just return.
if self.size < 2*SECTOR_LENGTH:
return
# zero the region header for the chunk (offset length and time)
self.file.seek(4 * (x + 32*z))
self.file.write(pack(">IB", 0, 0)[1:])
self.file.seek(SECTOR_LENGTH + 4 * (x + 32*z))
self.file.write(pack(">I", 0))
# Check if file should be truncated:
current = self.metadata[x, z]
free_sectors = self._locate_free_sectors(ignore_chunk=current)
truncate_count = list(reversed(free_sectors)).index(False)
if truncate_count > 0:
self.size = SECTOR_LENGTH * (len(free_sectors) - truncate_count)
self.file.truncate(self.size)
free_sectors = free_sectors[:-truncate_count]
# Calculate freed sectors
for s in range(current.blockstart, min(current.blockstart + current.blocklength, len(free_sectors))):
if free_sectors[s]:
# zero sector s
self.file.seek(SECTOR_LENGTH*s)
self.file.write(SECTOR_LENGTH*b'\x00')
# update the header
self.metadata[x, z] = ChunkMetadata(x, z) | Remove a chunk from the header of the region file.
Fragmentation is not a problem, chunks are written to free sectors when possible. |
def lpc(x, N=None):
"""Linear Predictor Coefficients.
:param x:
:param int N: default is length(X) - 1
:Details:
Finds the coefficients :math:`A=(1, a(2), \dots a(N+1))`, of an Nth order
forward linear predictor that predicts the current value value of the
real-valued time series x based on past samples:
.. math:: \hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N)
such that the sum of the squares of the errors
.. math:: err(n) = X(n) - Xp(n)
is minimized. This function uses the Levinson-Durbin recursion to
solve the normal equations that arise from the least-squares formulation.
.. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb`
.. todo:: matrix case, references
:Example:
::
from scipy.signal import lfilter
noise = randn(50000,1); % Normalized white Gaussian noise
x = filter([1], [1 1/2 1/3 1/4], noise)
x = x[45904:50000]
x.reshape(4096, 1)
x = x[0]
Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:
1.00000 + 0.00000i 0.51711 - 0.00000i 0.33908 - 0.00000i 0.24410 - 0.00000i
::
a = lpc(x, 3)
est_x = lfilter([0 -a(2:end)],1,x); % Estimated signal
e = x - est_x; % Prediction error
[acs,lags] = xcorr(e,'coeff'); % ACS of prediction error
"""
m = len(x)
if N is None:
N = m - 1 #default value if N is not provided
elif N > m-1:
#disp('Warning: zero-padding short input sequence')
x.resize(N+1)
#todo: check this zero-padding.
X = fft(x, 2**nextpow2(2.*len(x)-1))
R = real(ifft(abs(X)**2))
R = R/(m-1.) #Biased autocorrelation estimate
a, e, ref = LEVINSON(R, N)
return a, e | Linear Predictor Coefficients.
:param x:
:param int N: default is length(X) - 1
:Details:
Finds the coefficients :math:`A=(1, a(2), \dots a(N+1))`, of an Nth order
forward linear predictor that predicts the current value value of the
real-valued time series x based on past samples:
.. math:: \hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N)
such that the sum of the squares of the errors
.. math:: err(n) = X(n) - Xp(n)
is minimized. This function uses the Levinson-Durbin recursion to
solve the normal equations that arise from the least-squares formulation.
.. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb`
.. todo:: matrix case, references
:Example:
::
from scipy.signal import lfilter
noise = randn(50000,1); % Normalized white Gaussian noise
x = filter([1], [1 1/2 1/3 1/4], noise)
x = x[45904:50000]
x.reshape(4096, 1)
x = x[0]
Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:
1.00000 + 0.00000i 0.51711 - 0.00000i 0.33908 - 0.00000i 0.24410 - 0.00000i
::
a = lpc(x, 3)
est_x = lfilter([0 -a(2:end)],1,x); % Estimated signal
e = x - est_x; % Prediction error
[acs,lags] = xcorr(e,'coeff'); % ACS of prediction error |
def head(self, url):
'''head request, typically used for status code retrieval, etc.
'''
bot.debug('HEAD %s' %url)
return self._call(url, func=requests.head) | head request, typically used for status code retrieval, etc. |
def get_transition (self, input_symbol, state):
'''This returns (action, next state) given an input_symbol and state.
This does not modify the FSM state, so calling this method has no side
effects. Normally you do not call this method directly. It is called by
process().
The sequence of steps to check for a defined transition goes from the
most specific to the least specific.
1. Check state_transitions[] that match exactly the tuple,
(input_symbol, state)
2. Check state_transitions_any[] that match (state)
In other words, match a specific state and ANY input_symbol.
3. Check if the default_transition is defined.
This catches any input_symbol and any state.
This is a handler for errors, undefined states, or defaults.
4. No transition was defined. If we get here then raise an exception.
'''
if (input_symbol, state) in self.state_transitions:
return self.state_transitions[(input_symbol, state)]
elif state in self.state_transitions_any:
return self.state_transitions_any[state]
elif self.default_transition is not None:
return self.default_transition
else:
raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
(str(input_symbol), str(state)) ) | This returns (action, next state) given an input_symbol and state.
This does not modify the FSM state, so calling this method has no side
effects. Normally you do not call this method directly. It is called by
process().
The sequence of steps to check for a defined transition goes from the
most specific to the least specific.
1. Check state_transitions[] that match exactly the tuple,
(input_symbol, state)
2. Check state_transitions_any[] that match (state)
In other words, match a specific state and ANY input_symbol.
3. Check if the default_transition is defined.
This catches any input_symbol and any state.
This is a handler for errors, undefined states, or defaults.
4. No transition was defined. If we get here then raise an exception. |
def solidangle_errorprop(twotheta, dtwotheta, sampletodetectordistance, dsampletodetectordistance, pixelsize=None):
"""Solid-angle correction for two-dimensional SAS images with error propagation
Inputs:
twotheta: matrix of two-theta values
dtwotheta: matrix of absolute error of two-theta values
sampletodetectordistance: sample-to-detector distance
dsampletodetectordistance: absolute error of sample-to-detector distance
Outputs two matrices of the same shape as twotheta. The scattering intensity
matrix should be multiplied by the first one. The second one is the propagated
error of the first one.
"""
SAC = solidangle(twotheta, sampletodetectordistance, pixelsize)
if pixelsize is None:
pixelsize = 1
return (SAC,
(sampletodetectordistance * (4 * dsampletodetectordistance ** 2 * np.cos(twotheta) ** 2 +
9 * dtwotheta ** 2 * sampletodetectordistance ** 2 * np.sin(twotheta) ** 2) ** 0.5
/ np.cos(twotheta) ** 4) / pixelsize ** 2) | Solid-angle correction for two-dimensional SAS images with error propagation
Inputs:
twotheta: matrix of two-theta values
dtwotheta: matrix of absolute error of two-theta values
sampletodetectordistance: sample-to-detector distance
dsampletodetectordistance: absolute error of sample-to-detector distance
Outputs two matrices of the same shape as twotheta. The scattering intensity
matrix should be multiplied by the first one. The second one is the propagated
error of the first one. |
def data(self):
"""
Returns a dictionnary containing all the passed data and an item
``error_list`` which holds the result of :attr:`error_list`.
"""
res = {'error_list': self.error_list}
res.update(super(ValidationErrors, self).data)
return res | Returns a dictionnary containing all the passed data and an item
``error_list`` which holds the result of :attr:`error_list`. |
def areObservableElements(self, elementNames):
"""
Mention if all elements are observable element.
:param str ElementName: the element name to evaluate
:return: true if is an observable element, otherwise false.
:rtype: bool
"""
if not(hasattr(elementNames, "__len__")):
raise TypeError(
"Element name should be a array of strings." +
"I receive this {0}"
.format(elementNames))
return self._evaluateArray(elementNames) | Mention if all elements are observable element.
:param str ElementName: the element name to evaluate
:return: true if is an observable element, otherwise false.
:rtype: bool |
def update(self, personId, emails=None, displayName=None, firstName=None,
lastName=None, avatar=None, orgId=None, roles=None,
licenses=None, **request_parameters):
"""Update details for a person, by ID.
Only an admin can update a person's details.
Email addresses for a person cannot be changed via the Webex Teams API.
Include all details for the person. This action expects all user
details to be present in the request. A common approach is to first GET
the person's details, make changes, then PUT both the changed and
unchanged values.
Args:
personId(basestring): The person ID.
emails(`list`): Email address(es) of the person (list of strings).
displayName(basestring): Full name of the person.
firstName(basestring): First name of the person.
lastName(basestring): Last name of the person.
avatar(basestring): URL to the person's avatar in PNG format.
orgId(basestring): ID of the organization to which this
person belongs.
roles(`list`): Roles of the person (list of strings containing
the role IDs to be assigned to the person).
licenses(`list`): Licenses allocated to the person (list of
strings - containing the license IDs to be allocated to the
person).
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Person: A Person object with the updated details.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(emails, list)
check_type(displayName, basestring)
check_type(firstName, basestring)
check_type(lastName, basestring)
check_type(avatar, basestring)
check_type(orgId, basestring)
check_type(roles, list)
check_type(licenses, list)
put_data = dict_from_items_with_values(
request_parameters,
emails=emails,
displayName=displayName,
firstName=firstName,
lastName=lastName,
avatar=avatar,
orgId=orgId,
roles=roles,
licenses=licenses,
)
# API request
json_data = self._session.put(API_ENDPOINT + '/' + personId,
json=put_data)
# Return a person object created from the returned JSON object
return self._object_factory(OBJECT_TYPE, json_data) | Update details for a person, by ID.
Only an admin can update a person's details.
Email addresses for a person cannot be changed via the Webex Teams API.
Include all details for the person. This action expects all user
details to be present in the request. A common approach is to first GET
the person's details, make changes, then PUT both the changed and
unchanged values.
Args:
personId(basestring): The person ID.
emails(`list`): Email address(es) of the person (list of strings).
displayName(basestring): Full name of the person.
firstName(basestring): First name of the person.
lastName(basestring): Last name of the person.
avatar(basestring): URL to the person's avatar in PNG format.
orgId(basestring): ID of the organization to which this
person belongs.
roles(`list`): Roles of the person (list of strings containing
the role IDs to be assigned to the person).
licenses(`list`): Licenses allocated to the person (list of
strings - containing the license IDs to be allocated to the
person).
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Person: A Person object with the updated details.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. |
def accel_decrease_height(self, *args):
"""Callback to decrease height.
"""
height = self.settings.general.get_int('window-height')
self.settings.general.set_int('window-height', max(height - 2, 0))
return True | Callback to decrease height. |
def get_cfn_parameters(self):
"""Return a dictionary of variables with `type` :class:`CFNType`.
Returns:
dict: variables that need to be submitted as CloudFormation
Parameters.
"""
variables = self.get_variables()
output = {}
for key, value in variables.items():
if hasattr(value, "to_parameter_value"):
output[key] = value.to_parameter_value()
return output | Return a dictionary of variables with `type` :class:`CFNType`.
Returns:
dict: variables that need to be submitted as CloudFormation
Parameters. |
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra | Return the per-file header as a string. |
def irelay(gen, thru):
"""Create a new generator by relaying yield/send interactions
through another generator
Parameters
----------
gen: Generable[T_yield, T_send, T_return]
the original generator
thru: ~typing.Callable[[T_yield], ~typing.Generator]
the generator callable through which each interaction is relayed
Returns
-------
~typing.Generator
the relayed generator
"""
gen = iter(gen)
assert _is_just_started(gen)
yielder = yield_from(gen)
for item in yielder:
with yielder:
subgen = thru(item)
subyielder = yield_from(subgen)
for subitem in subyielder:
with subyielder:
subyielder.send((yield subitem))
yielder.send(subyielder.result)
return_(yielder.result) | Create a new generator by relaying yield/send interactions
through another generator
Parameters
----------
gen: Generable[T_yield, T_send, T_return]
the original generator
thru: ~typing.Callable[[T_yield], ~typing.Generator]
the generator callable through which each interaction is relayed
Returns
-------
~typing.Generator
the relayed generator |
def parse_args(args=None):
"""Parse command line arguments and return a dictionary of options
for ttfautohint.ttfautohint function.
`args` can be either None, a list of strings, or a single string,
that is split into individual options with `shlex.split`.
When `args` is None, the console's default sys.argv are used, and any
SystemExit exceptions raised by argparse are propagated.
If args is a string list or a string, it is assumed that the function
was not called from a console script's `main` entry point, but from
other client code, and thus the SystemExit exceptions are muted and
a `None` value is returned.
"""
import argparse
from ttfautohint import __version__, libttfautohint
from ttfautohint.cli import USAGE, DESCRIPTION, EPILOG
version_string = "ttfautohint-py %s (libttfautohint %s)" % (
__version__, libttfautohint.version_string)
if args is None:
capture_sys_exit = False
else:
capture_sys_exit = True
if isinstance(args, basestring):
import shlex
args = shlex.split(args)
parser = argparse.ArgumentParser(
prog="ttfautohint",
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"in_file", nargs="?", metavar="IN-FILE", default="-",
type=stdin_or_input_path_type,
help="input file (default: standard input)")
parser.add_argument(
"out_file", nargs="?", metavar="OUT-FILE", default="-",
type=stdout_or_output_path_type,
help="output file (default: standard output)")
parser.add_argument(
"--debug", action="store_true", help="print debugging information")
stem_width_group = parser.add_mutually_exclusive_group(required=False)
stem_width_group.add_argument(
"-a", "--stem-width-mode", type=stem_width_mode, metavar="S",
default=STEM_WIDTH_MODE_OPTIONS,
help=("select stem width mode for grayscale, GDI ClearType, and DW "
"ClearType, where S is a string of three letters with possible "
"values 'n' for natural, 'q' for quantized, and 's' for strong "
"(default: qsq)"))
stem_width_group.add_argument( # deprecated
"-w", "--strong-stem-width", type=strong_stem_width, metavar="S",
help=argparse.SUPPRESS)
parser.add_argument(
"-c", "--composites", dest="hint_composites", action="store_true",
help="hint glyph composites also")
parser.add_argument(
"-d", "--dehint", action="store_true", help="remove all hints")
parser.add_argument(
"-D", "--default-script", metavar="SCRIPT",
default=USER_OPTIONS["default_script"],
help="set default OpenType script (default: %(default)s)")
parser.add_argument(
"-f", "--fallback-script", metavar="SCRIPT",
default=USER_OPTIONS["fallback_script"],
help="set fallback script (default: %(default)s)")
parser.add_argument(
"-F", "--family-suffix", metavar="SUFFIX",
help="append SUFFIX to the family name string(s) in the `name' table")
parser.add_argument(
"-G", "--hinting-limit", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_limit"],
help=("switch off hinting above this PPEM value (default: "
"%(default)s); value 0 means no limit"))
parser.add_argument(
"-H", "--fallback-stem-width", type=int, metavar="UNITS",
default=USER_OPTIONS["fallback_stem_width"],
help=("set fallback stem width (default: %(default)s font units at "
"2048 UPEM)"))
parser.add_argument(
"-i", "--ignore-restrictions", action="store_true",
help="override font license restrictions")
parser.add_argument(
"-I", "--detailed-info", action="store_true",
help=("add detailed ttfautohint info to the version string(s) in "
"the `name' table"))
parser.add_argument(
"-l", "--hinting-range-min", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_min"],
help="the minimum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-m", "--control-file", metavar="FILE",
help="get control instructions from FILE")
parser.add_argument(
"-n", "--no-info", action="store_true",
help=("don't add ttfautohint info to the version string(s) in the "
"`name' table"))
parser.add_argument(
"-p", "--adjust-subglyphs", action="store_true",
help="handle subglyph adjustments in exotic fonts")
parser.add_argument(
"-r", "--hinting-range-max", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_max"],
help="the maximum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-R", "--reference", dest="reference_file", metavar="FILE",
help="derive blue zones from reference font FILE")
parser.add_argument(
"-s", "--symbol", action="store_true",
help="input is symbol font")
parser.add_argument(
"-S", "--fallback-scaling", action="store_true",
help="use fallback scaling, not hinting")
parser.add_argument(
"-t", "--ttfa-table", action="store_true", dest="TTFA_info",
help="add TTFA information table")
parser.add_argument(
"-T", "--ttfa-info", dest="show_TTFA_info", action="store_true",
help="display TTFA table in IN-FILE and exit")
parser.add_argument(
"-v", "--verbose", action="store_true",
help="show progress information")
parser.add_argument(
"-V", "--version", action="version",
version=version_string,
help="print version information and exit")
parser.add_argument(
"-W", "--windows-compatibility", action="store_true",
help=("add blue zones for `usWinAscent' and `usWinDescent' to avoid "
"clipping"))
parser.add_argument(
"-x", "--increase-x-height", type=int, metavar="PPEM",
default=USER_OPTIONS["increase_x_height"],
help=("increase x height for sizes in the range 6<=PPEM<=N; value "
"0 switches off this feature (default: %(default)s)"))
parser.add_argument(
"-X", "--x-height-snapping-exceptions", metavar="STRING",
default=USER_OPTIONS["x_height_snapping_exceptions"],
help=('specify a comma-separated list of x-height snapping exceptions'
', for example "-9, 13-17, 19" (default: "%(default)s")'))
parser.add_argument(
"-Z", "--reference-index", type=int, metavar="NUMBER",
default=USER_OPTIONS["reference_index"],
help="face index of reference font (default: %(default)s)")
try:
options = vars(parser.parse_args(args))
except SystemExit:
if capture_sys_exit:
return None
raise
# if either input/output are interactive, print help and exit
if (not capture_sys_exit and
(options["in_file"] is None or options["out_file"] is None)):
parser.print_help()
parser.exit(1)
# check SOURCE_DATE_EPOCH environment variable
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
if source_date_epoch:
try:
options["epoch"] = int(source_date_epoch)
except ValueError:
import warnings
warnings.warn(
UserWarning("invalid SOURCE_DATE_EPOCH: %r" % source_date_epoch))
if options.pop("show_TTFA_info"):
# TODO use fonttools to dump TTFA table?
raise NotImplementedError()
stem_width_options = options.pop("stem_width_mode")
strong_stem_width_options = options.pop("strong_stem_width")
if strong_stem_width_options:
import warnings
warnings.warn(
UserWarning("Option '-w' is deprecated! Use option '-a' instead"))
stem_width_options = strong_stem_width_options
options.update(stem_width_options)
return options | Parse command line arguments and return a dictionary of options
for ttfautohint.ttfautohint function.
`args` can be either None, a list of strings, or a single string,
that is split into individual options with `shlex.split`.
When `args` is None, the console's default sys.argv are used, and any
SystemExit exceptions raised by argparse are propagated.
If args is a string list or a string, it is assumed that the function
was not called from a console script's `main` entry point, but from
other client code, and thus the SystemExit exceptions are muted and
a `None` value is returned. |
def read_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501
"""read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
return data | read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread. |
def db_create_table(self, table_name, columns):
"""Create a temporary DB table.
Arguments:
table_name (str): The name of the table.
columns (list): List of columns to add to the DB.
"""
formatted_columns = ''
for col in set(columns):
formatted_columns += '"{}" text, '.format(col.strip('"').strip('\''))
formatted_columns = formatted_columns.strip(', ')
create_table_sql = 'CREATE TABLE IF NOT EXISTS {} ({});'.format(
table_name, formatted_columns
)
try:
cr = self.db_conn.cursor()
cr.execute(create_table_sql)
except sqlite3.Error as e:
self.handle_error(e) | Create a temporary DB table.
Arguments:
table_name (str): The name of the table.
columns (list): List of columns to add to the DB. |
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owners=[user],
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.in_(
spec['name'] for spec in druid_config['metrics_spec']
))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit() | Merges the ds config from druid_config into one stored in the db. |
def _calculate(self, field):
'''
We want to avoid trouble, so if the field is not enclosed by any other field,
we just return 0.
'''
encloser = field.enclosing
if encloser:
rendered = encloser.get_rendered_fields(RenderContext(self))
if field not in rendered:
value = len(rendered)
else:
value = rendered.index(field)
else:
value = 0
return value | We want to avoid trouble, so if the field is not enclosed by any other field,
we just return 0. |
def _ExtractGoogleSearchQuery(self, url):
"""Extracts a search query from a Google URL.
Google Drive: https://drive.google.com/drive/search?q=query
Google Search: https://www.google.com/search?q=query
Google Sites: https://sites.google.com/site/.*/system/app/pages/
search?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'search' not in url or 'q=' not in url:
return None
line = self._GetBetweenQEqualsAndAmpersand(url)
if not line:
return None
return line.replace('+', ' ') | Extracts a search query from a Google URL.
Google Drive: https://drive.google.com/drive/search?q=query
Google Search: https://www.google.com/search?q=query
Google Sites: https://sites.google.com/site/.*/system/app/pages/
search?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found. |
def _generate_journal_nested_queries(self, value):
"""Generates ElasticSearch nested query(s).
Args:
value (string): Contains the journal_title, journal_volume and artid or start_page separated by a comma.
This value should be of type string.
Notes:
The value contains at least one of the 3 mentioned items, in this order and at most 3.
The 3rd is either the artid or the page_start and it will query the corresponding ES field for this item.
The values are then split on comma and stripped of spaces before being saved in a values list in order to
be assigned to corresponding fields.
"""
# Abstract away which is the third field, we care only for its existence.
third_journal_field = ElasticSearchVisitor.JOURNAL_PAGE_START
new_publication_info = ElasticSearchVisitor._preprocess_journal_query_value(third_journal_field, value)
# We always expect a journal title, otherwise query would be considered malformed, and thus this method would
# not have been called.
queries_for_each_field = [
generate_match_query(ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[ElasticSearchVisitor.JOURNAL_TITLE],
new_publication_info[ElasticSearchVisitor.JOURNAL_TITLE],
with_operator_and=False)
]
if ElasticSearchVisitor.JOURNAL_VOLUME in new_publication_info:
queries_for_each_field.append(
generate_match_query(
ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[ElasticSearchVisitor.JOURNAL_VOLUME],
new_publication_info[ElasticSearchVisitor.JOURNAL_VOLUME],
with_operator_and=False
)
)
if third_journal_field in new_publication_info:
artid_or_page_start = new_publication_info[third_journal_field]
match_queries = [
generate_match_query(
ElasticSearchVisitor.JOURNAL_FIELDS_MAPPING[third_field],
artid_or_page_start,
with_operator_and=False
)
for third_field
in (ElasticSearchVisitor.JOURNAL_PAGE_START, ElasticSearchVisitor.JOURNAL_ART_ID)
]
queries_for_each_field.append(
wrap_queries_in_bool_clauses_if_more_than_one(match_queries, use_must_clause=False)
)
return generate_nested_query(
ElasticSearchVisitor.JOURNAL_FIELDS_PREFIX,
wrap_queries_in_bool_clauses_if_more_than_one(queries_for_each_field, use_must_clause=True)
) | Generates ElasticSearch nested query(s).
Args:
value (string): Contains the journal_title, journal_volume and artid or start_page separated by a comma.
This value should be of type string.
Notes:
The value contains at least one of the 3 mentioned items, in this order and at most 3.
The 3rd is either the artid or the page_start and it will query the corresponding ES field for this item.
The values are then split on comma and stripped of spaces before being saved in a values list in order to
be assigned to corresponding fields. |
def hpcluster(self, data: ['SASdata', str] = None,
freq: str = None,
id: [str, list] = None,
input: [str, list, dict] = None,
score: [str, bool, 'SASdata'] = True,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the HPCLUS procedure
Documentation link:
https://go.documentation.sas.com/?docsetId=emhpprcref&docsetTarget=emhpprcref_hpclus_toc.htm&docsetVersion=14.2&locale=en
:param data: SASdata object or string. This parameter is required.
:parm freq: The freq variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required
:parm score: The score variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | Python method to call the HPCLUS procedure
Documentation link:
https://go.documentation.sas.com/?docsetId=emhpprcref&docsetTarget=emhpprcref_hpclus_toc.htm&docsetVersion=14.2&locale=en
:param data: SASdata object or string. This parameter is required.
:parm freq: The freq variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required
:parm score: The score variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object |
def _inner(self, x1, x2):
"""Raw inner product of two elements."""
return self.tspace._inner(x1.tensor, x2.tensor) | Raw inner product of two elements. |
def get_encrypted_reply(self, message):
"""
对一个指定的 WeRoBot Message ,获取 handlers 处理后得到的 Reply。
如果可能,对该 Reply 进行加密。
返回 Reply Render 后的文本。
:param message: 一个 WeRoBot Message 实例。
:return: reply (纯文本)
"""
reply = self.get_reply(message)
if not reply:
self.logger.warning("No handler responded message %s" % message)
return ''
if self.use_encryption:
return self.crypto.encrypt_message(reply)
else:
return reply.render() | 对一个指定的 WeRoBot Message ,获取 handlers 处理后得到的 Reply。
如果可能,对该 Reply 进行加密。
返回 Reply Render 后的文本。
:param message: 一个 WeRoBot Message 实例。
:return: reply (纯文本) |
def _expand_list(names):
""" Do a wildchar name expansion of object names in a list and return expanded list.
The items are expected to exist as this is used for copy sources or delete targets.
Currently we support wildchars in the key name only.
"""
if names is None:
names = []
elif isinstance(names, basestring):
names = [names]
results = [] # The expanded list.
items = {} # Cached contents of buckets; used for matching.
for name in names:
bucket, key = datalab.storage._bucket.parse_name(name)
results_len = len(results) # If we fail to add any we add name and let caller deal with it.
if bucket:
if not key:
# Just a bucket; add it.
results.append('gs://%s' % bucket)
elif datalab.storage.Item(bucket, key).exists():
results.append('gs://%s/%s' % (bucket, key))
else:
# Expand possible key values.
if bucket not in items and key[:1] == '*':
# We need the full list; cache a copy for efficiency.
items[bucket] = [item.metadata.name
for item in list(datalab.storage.Bucket(bucket).items())]
# If we have a cached copy use it
if bucket in items:
candidates = items[bucket]
# else we have no cached copy but can use prefix matching which is more efficient than
# getting the full contents.
else:
# Get the non-wildchar prefix.
match = re.search('\?|\*|\[', key)
prefix = key
if match:
prefix = key[0:match.start()]
candidates = [item.metadata.name
for item in datalab.storage.Bucket(bucket).items(prefix=prefix)]
for item in candidates:
if fnmatch.fnmatch(item, key):
results.append('gs://%s/%s' % (bucket, item))
# If we added no matches, add the original name and let caller deal with it.
if len(results) == results_len:
results.append(name)
return results | Do a wildchar name expansion of object names in a list and return expanded list.
The items are expected to exist as this is used for copy sources or delete targets.
Currently we support wildchars in the key name only. |
def reverse_readfile(filename):
"""
A much faster reverse read of file by using Python's mmap to generate a
memory-mapped file. It is slower for very small files than
reverse_readline, but at least 2x faster for large files (the primary use
of such a method).
Args:
filename (str):
Name of file to read.
Yields:
Lines from the file in reverse order.
"""
try:
with zopen(filename, "rb") as f:
if isinstance(f, gzip.GzipFile) or isinstance(f, bz2.BZ2File):
for l in reversed(f.readlines()):
yield l.decode("utf-8").rstrip()
else:
fm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
n = len(fm)
while n > 0:
i = fm.rfind(b"\n", 0, n)
yield fm[i + 1:n].decode("utf-8").strip("\n")
n = i
except ValueError:
return | A much faster reverse read of file by using Python's mmap to generate a
memory-mapped file. It is slower for very small files than
reverse_readline, but at least 2x faster for large files (the primary use
of such a method).
Args:
filename (str):
Name of file to read.
Yields:
Lines from the file in reverse order. |
def strduration_long (duration, do_translate=True):
"""Turn a time value in seconds into x hours, x minutes, etc."""
if do_translate:
# use global translator functions
global _, _n
else:
# do not translate
_ = lambda x: x
_n = lambda a, b, n: a if n==1 else b
if duration < 0:
duration = abs(duration)
prefix = "-"
else:
prefix = ""
if duration < 1:
return _("%(prefix)s%(duration).02f seconds") % \
{"prefix": prefix, "duration": duration}
# translation dummies
_n("%d second", "%d seconds", 1)
_n("%d minute", "%d minutes", 1)
_n("%d hour", "%d hours", 1)
_n("%d day", "%d days", 1)
_n("%d year", "%d years", 1)
cutoffs = [
(60, "%d second", "%d seconds"),
(60, "%d minute", "%d minutes"),
(24, "%d hour", "%d hours"),
(365, "%d day", "%d days"),
(None, "%d year", "%d years"),
]
time_str = []
for divisor, single, plural in cutoffs:
if duration < 1:
break
if divisor is None:
duration, unit = 0, duration
else:
duration, unit = divmod(duration, divisor)
if unit:
time_str.append(_n(single, plural, unit) % unit)
time_str.reverse()
if len(time_str) > 2:
time_str.pop()
return "%s%s" % (prefix, ", ".join(time_str)) | Turn a time value in seconds into x hours, x minutes, etc. |
def preview(self, components=None, ask=0):
"""
Inspects differences between the last deployment and the current code state.
"""
ask = int(ask)
self.init()
component_order, plan_funcs = self.get_component_funcs(components=components)
print('\n%i changes found for host %s.\n' % (len(component_order), self.genv.host_string))
if component_order and plan_funcs:
if self.verbose:
print('These components have changed:\n')
for component in sorted(component_order):
print((' '*4)+component)
print('Deployment plan for host %s:\n' % self.genv.host_string)
for func_name, _ in plan_funcs:
print(success_str((' '*4)+func_name))
if component_order:
print()
if ask and self.genv.host_string == self.genv.hosts[-1]:
if component_order:
if not raw_input('Begin deployment? [yn] ').strip().lower().startswith('y'):
sys.exit(0)
else:
sys.exit(0) | Inspects differences between the last deployment and the current code state. |
def SetScrollPercent(self, horizontalPercent: float, verticalPercent: float, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Call IUIAutomationScrollPattern::SetScrollPercent.
Set the horizontal and vertical scroll positions as a percentage of the total content area within the UI Automation element.
horizontalPercent: float or int, a value in [0, 100] or ScrollPattern.NoScrollValue(-1) if no scroll.
verticalPercent: float or int, a value in [0, 100] or ScrollPattern.NoScrollValue(-1) if no scroll.
waitTime: float.
Return bool, True if succeed otherwise False.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-setscrollpercent
"""
ret = self.pattern.SetScrollPercent(horizontalPercent, verticalPercent) == S_OK
time.sleep(waitTime)
return ret | Call IUIAutomationScrollPattern::SetScrollPercent.
Set the horizontal and vertical scroll positions as a percentage of the total content area within the UI Automation element.
horizontalPercent: float or int, a value in [0, 100] or ScrollPattern.NoScrollValue(-1) if no scroll.
verticalPercent: float or int, a value in [0, 100] or ScrollPattern.NoScrollValue(-1) if no scroll.
waitTime: float.
Return bool, True if succeed otherwise False.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-setscrollpercent |
def _keep_this(self, name):
"""Return True if there are to be no modifications to name."""
for keep_name in self.keep:
if name == keep_name:
return True
return False | Return True if there are to be no modifications to name. |
def lt(self, key, value, includeMissing=False):
'''Return entries where the key's value is less (<).
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).lt("age", 19).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]}
]
.. versionadded:: 0.1.1
:param key:
The dictionary key (or cascading list of keys) that should be the
basis of comparison.
:param value:
The value to compare with.
:param includeMissing:
Defaults to False. If True, then entries missing the key are also
included.
:returns: self
'''
(self.table, self.index_track) = internal.select(self.table, self.index_track, key, self.LESS, value, includeMissing)
return self | Return entries where the key's value is less (<).
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).lt("age", 19).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]}
]
.. versionadded:: 0.1.1
:param key:
The dictionary key (or cascading list of keys) that should be the
basis of comparison.
:param value:
The value to compare with.
:param includeMissing:
Defaults to False. If True, then entries missing the key are also
included.
:returns: self |
def head_object_async(self, path, **kwds):
"""HEAD an object.
Depending on request headers, HEAD returns various object properties,
e.g. Content-Length, Last-Modified, and ETag.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'HEAD', **kwds) | HEAD an object.
Depending on request headers, HEAD returns various object properties,
e.g. Content-Length, Last-Modified, and ETag.
Note: No payload argument is supported. |
def get(app, name):
'''Get a backend given its name'''
backend = get_all(app).get(name)
if not backend:
msg = 'Harvest backend "{0}" is not registered'.format(name)
raise EntrypointError(msg)
return backend | Get a backend given its name |
def _sync_enter(self):
"""
Helps to cut boilerplate on async context
managers that offer synchronous variants.
"""
if hasattr(self, 'loop'):
loop = self.loop
else:
loop = self._client.loop
if loop.is_running():
raise RuntimeError(
'You must use "async with" if the event loop '
'is running (i.e. you are inside an "async def")'
)
return loop.run_until_complete(self.__aenter__()) | Helps to cut boilerplate on async context
managers that offer synchronous variants. |
def iterscrapers(self, method, mode = None):
''' Iterates over all available scrapers. '''
global discovered
if discovered.has_key(self.language) and discovered[self.language].has_key(method):
for Scraper in discovered[self.language][method]:
yield Scraper | Iterates over all available scrapers. |
def dftphotom(cfg):
"""Run the discrete-Fourier-transform photometry algorithm.
See the module-level documentation and the output of ``casatask dftphotom
--help`` for help. All of the algorithm configuration is specified in the
*cfg* argument, which is an instance of :class:`Config`.
"""
tb = util.tools.table()
ms = util.tools.ms()
me = util.tools.measures()
# Read stuff in. Even if the weight values don't have their
# absolute scale set correctly, we can still use them to set the
# relative weighting of the data points.
#
# datacol is (ncorr, nchan, nchunk)
# flag is (ncorr, nchan, nchunk)
# weight is (ncorr, nchunk)
# uvw is (3, nchunk)
# time is (nchunk)
# axis_info.corr_axis is (ncorr)
# axis_info.freq_axis.chan_freq is (nchan, 1) [for now?]
#
# Note that we apply msselect() again when reading the data because
# selectinit() is broken, but the invocation here is good because it
# affects the results from ms.range() and friends.
if ':' in (cfg.spw or ''):
warn('it looks like you are attempting to select channels within one or more spws')
warn('this is NOT IMPLEMENTED; I will average over the whole spw instead')
ms.open(b(cfg.vis))
totrows = ms.nrow()
ms_sels = dict((n, cfg.get(n)) for n in util.msselect_keys
if cfg.get(n) is not None)
ms.msselect(b(ms_sels))
rangeinfo = ms.range(b'data_desc_id field_id'.split())
ddids = rangeinfo['data_desc_id']
fields = rangeinfo['field_id']
colnames = [cfg.datacol] + 'flag weight time axis_info'.split()
rephase = (cfg.rephase is not None)
if fields.size != 1:
# I feel comfortable making this a fatal error, even if we're
# not rephasing.
die('selected data should contain precisely one field; got %d', fields.size)
if rephase:
fieldid = fields[0]
tb.open(b(os.path.join(cfg.vis, 'FIELD')))
phdirinfo = tb.getcell(b'PHASE_DIR', fieldid)
tb.close()
if phdirinfo.shape[1] != 1:
die('trying to rephase but target field (#%d) has a '
'time-variable phase center, which I can\'t handle', fieldid)
ra0, dec0 = phdirinfo[:,0] # in radians.
# based on intflib/pwflux.py, which was copied from
# hex/hex-lib-calcgainerr:
dra = cfg.rephase[0] - ra0
dec = cfg.rephase[1]
l = np.sin(dra) * np.cos(dec)
m = np.sin(dec) * np.cos(dec0) - np.cos(dra) * np.cos(dec) * np.sin(dec0)
n = np.sin(dec) * np.sin(dec0) + np.cos(dra) * np.cos(dec) * np.cos(dec0)
n -= 1 # makes the work below easier
lmn = np.asarray([l, m, n])
colnames.append('uvw')
# Also need this although 99% of the time `ddid` and `spwid` are the same
tb.open(b(os.path.join(cfg.vis, 'DATA_DESCRIPTION')))
ddspws = np.asarray(tb.getcol(b'SPECTRAL_WINDOW_ID'))
tb.close()
tbins = {}
colnames = b(colnames)
for ddindex, ddid in enumerate(ddids):
# Starting in CASA 4.6, selectinit(ddid) stopped actually filtering
# your data to match the specified DDID! What garbage. Work around
# with our own filtering.
ms_sels['taql'] = 'DATA_DESC_ID == %d' % ddid
ms.msselect(b(ms_sels))
ms.selectinit(ddid)
if cfg.polarization is not None:
ms.selectpolarization(b(cfg.polarization.split(',')))
ms.iterinit(maxrows=4096)
ms.iterorigin()
while True:
cols = ms.getdata(items=colnames)
if rephase:
# With appropriate spw/DDID selection, `freqs` has shape
# (nchan, 1). Convert to m^-1 so we can multiply against UVW
# directly.
freqs = cols['axis_info']['freq_axis']['chan_freq']
assert freqs.shape[1] == 1, 'internal inconsistency, chan_freq??'
freqs = freqs[:,0] * util.INVERSE_C_MS
for i in range(cols['time'].size): # all records
time = cols['time'][i]
# get out of UTC as fast as we can! For some reason
# giving 'unit=s' below doesn't do what one might hope it would.
# CASA can convert to a variety of timescales; TAI is probably
# the safest conversion in terms of being helpful while remaining
# close to the fundamental data, but TT is possible and should
# be perfectly precise for standard applications.
mq = me.epoch(b'utc', b({'value': time / 86400., 'unit': 'd'}))
mjdtt = me.measure(b(mq), b'tt')['m0']['value']
tdata = tbins.get(mjdtt, None)
if tdata is None:
tdata = tbins[mjdtt] = [0., 0., 0., 0., 0]
if rephase:
uvw = cols['uvw'][:,i]
ph = np.exp((0-2j) * np.pi * np.dot(lmn, uvw) * freqs)
for j in range(cols['flag'].shape[0]): # all polns
# We just average together all polarizations right now!
# (Not actively, but passively by just iterating over them.)
data = cols[cfg.datacol][j,:,i]
flags = cols['flag'][j,:,i]
# XXXXX casacore is currently (ca. 2012) broken and
# returns the raw weights from the dataset rather than
# applying the polarization selection. Fortunately all of
# our weights are the same, and you can never fetch more
# pol types than the dataset has, so this bit works
# despite the bug.
w = np.where(~flags)[0]
if not w.size:
continue # all flagged
if rephase:
data *= ph
d = data[w].mean()
# account for flagged parts. 90% sure this is the
# right thing to do:
wt = cols['weight'][j,i] * w.size / data.size
wd = wt * d
# note a little bit of a hack here to encode real^2 and
# imag^2 separately:
wd2 = wt * (d.real**2 + (1j) * d.imag**2)
tdata[0] += wd
tdata[1] += wd2
tdata[2] += wt
tdata[3] += wt**2
tdata[4] += 1
if not ms.iternext():
break
ms.reset() # reset selection filter so we can get next DDID
ms.close()
# Could gain some efficiency by using a better data structure than a dict().
smjd = sorted(six.iterkeys(tbins))
cfg.format.header(cfg)
for mjd in smjd:
wd, wd2, wt, wt2, n = tbins[mjd]
if n < 3: # not enough data for meaningful statistics
continue
dtmin = 1440 * (mjd - smjd[0])
r_sc = wd.real / wt * cfg.datascale
i_sc = wd.imag / wt * cfg.datascale
r2_sc = wd2.real / wt * cfg.datascale**2
i2_sc = wd2.imag / wt * cfg.datascale**2
if cfg.believeweights:
ru_sc = wt**-0.5 * cfg.datascale
iu_sc = wt**-0.5 * cfg.datascale
else:
rv_sc = r2_sc - r_sc**2 # variance among real/imag msmts
iv_sc = i2_sc - i_sc**2
ru_sc = np.sqrt(rv_sc * wt2) / wt # uncert in mean real/img values
iu_sc = np.sqrt(iv_sc * wt2) / wt
mag = np.sqrt(r_sc**2 + i_sc**2)
umag = np.sqrt(r_sc**2 * ru_sc**2 + i_sc**2 * iu_sc**2) / mag
cfg.format.row(cfg, mjd, dtmin, r_sc, ru_sc, i_sc, iu_sc, mag, umag, n) | Run the discrete-Fourier-transform photometry algorithm.
See the module-level documentation and the output of ``casatask dftphotom
--help`` for help. All of the algorithm configuration is specified in the
*cfg* argument, which is an instance of :class:`Config`. |
def file_resolve(backend, filepath):
"""
Mark a conflicted file as resolved, so that a merge can be completed
"""
recipe = DKRecipeDisk.find_recipe_name()
if recipe is None:
raise click.ClickException('You must be in a recipe folder.')
click.secho("%s - Resolving conflicts" % get_datetime())
for file_to_resolve in filepath:
if not os.path.exists(file_to_resolve):
raise click.ClickException('%s does not exist' % file_to_resolve)
check_and_print(DKCloudCommandRunner.resolve_conflict(file_to_resolve)) | Mark a conflicted file as resolved, so that a merge can be completed |
def wrap_text(text, width):
"""
Wrap text paragraphs to the given character width while preserving
newlines.
"""
out = []
for paragraph in text.splitlines():
# Wrap returns an empty list when paragraph is a newline. In order
# to preserve newlines we substitute a list containing an empty
# string.
lines = wrap(paragraph, width=width) or ['']
out.extend(lines)
return out | Wrap text paragraphs to the given character width while preserving
newlines. |
def add_argument(self, *args, parser=None, autoenv=False, env=None,
complete=None, **kwargs):
""" Allow cleaner action supplementation. Autoenv will generate an
environment variable to be usable as a defaults setter based on the
command name and the dest property of the action. """
if parser is None:
parser = self.argparser
action = parser.add_argument(*args, **kwargs)
if autoenv:
if env is not None:
raise TypeError('Arguments `env` and `autoenv` are mutually '
'exclusive')
env = self._make_autoenv(action)
if env:
self.argparser.bind_env(action, env)
if autoenv:
self._autoenv_actions.add(action)
if complete:
action.complete = complete
return action | Allow cleaner action supplementation. Autoenv will generate an
environment variable to be usable as a defaults setter based on the
command name and the dest property of the action. |
def application_information(self, application_id):
"""
The MapReduce application master information resource provides overall
information about that mapreduce application master.
This includes application id, time it was started, user, name, etc.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/info'.format(
appid=application_id)
return self.request(path) | The MapReduce application master information resource provides overall
information about that mapreduce application master.
This includes application id, time it was started, user, name, etc.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` |
def svd_thresh(data, threshold=None, n_pc=None, thresh_type='hard'):
r"""Threshold the singular values
This method thresholds the input data using singular value decomposition
Parameters
----------
data : np.ndarray
Input data array, 2D matrix
threshold : float or np.ndarray, optional
Threshold value(s)
n_pc : int or str, optional
Number of principal components, specify an integer value or 'all'
threshold_type : str {'hard', 'soft'}, optional
Type of thresholding (default is 'hard')
Returns
-------
np.ndarray thresholded data
Raises
------
ValueError
For invalid n_pc value
Examples
--------
>>> from modopt.signal.svd import svd_thresh
>>> x = np.arange(18).reshape(9, 2).astype(float)
>>> svd_thresh(x, n_pc=1)
array([[ 0.49815487, 0.54291537],
[ 2.40863386, 2.62505584],
[ 4.31911286, 4.70719631],
[ 6.22959185, 6.78933678],
[ 8.14007085, 8.87147725],
[ 10.05054985, 10.95361772],
[ 11.96102884, 13.03575819],
[ 13.87150784, 15.11789866],
[ 15.78198684, 17.20003913]])
"""
if ((not isinstance(n_pc, (int, str, type(None)))) or
(isinstance(n_pc, int) and n_pc <= 0) or
(isinstance(n_pc, str) and n_pc != 'all')):
raise ValueError('Invalid value for "n_pc", specify a positive '
'integer value or "all"')
# Get SVD of input data.
u, s, v = calculate_svd(data)
# Find the threshold if not provided.
if isinstance(threshold, type(None)):
# Find the required number of principal components if not specified.
if isinstance(n_pc, type(None)):
n_pc = find_n_pc(u, factor=0.1)
# If the number of PCs is too large use all of the singular values.
if ((isinstance(n_pc, int) and n_pc >= s.size) or
(isinstance(n_pc, str) and n_pc == 'all')):
n_pc = s.size
warn('Using all singular values.')
threshold = s[n_pc - 1]
# Threshold the singular values.
s_new = thresh(s, threshold, thresh_type)
if np.all(s_new == s):
warn('No change to singular values.')
# Diagonalize the svd
s_new = np.diag(s_new)
# Return the thresholded data.
return np.dot(u, np.dot(s_new, v)) | r"""Threshold the singular values
This method thresholds the input data using singular value decomposition
Parameters
----------
data : np.ndarray
Input data array, 2D matrix
threshold : float or np.ndarray, optional
Threshold value(s)
n_pc : int or str, optional
Number of principal components, specify an integer value or 'all'
threshold_type : str {'hard', 'soft'}, optional
Type of thresholding (default is 'hard')
Returns
-------
np.ndarray thresholded data
Raises
------
ValueError
For invalid n_pc value
Examples
--------
>>> from modopt.signal.svd import svd_thresh
>>> x = np.arange(18).reshape(9, 2).astype(float)
>>> svd_thresh(x, n_pc=1)
array([[ 0.49815487, 0.54291537],
[ 2.40863386, 2.62505584],
[ 4.31911286, 4.70719631],
[ 6.22959185, 6.78933678],
[ 8.14007085, 8.87147725],
[ 10.05054985, 10.95361772],
[ 11.96102884, 13.03575819],
[ 13.87150784, 15.11789866],
[ 15.78198684, 17.20003913]]) |
def _set_switch_state(self, v, load=False):
"""
Setter method for switch_state, mapped from YANG variable /brocade_system_monitor_ext_rpc/show_system_monitor/output/switch_status/switch_state (system-monitor-health-state-enum)
If this variable is read-only (config: false) in the
source YANG file, then _set_switch_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_switch_state() directly.
YANG Description: switch status based on components
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'state-healthy': {}, u'state-unknown': {}, u'state-unmonitored': {}, u'state-down': {}, u'state-marginal': {}},), is_leaf=True, yang_name="switch-state", rest_name="switch-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-system-monitor-ext', defining_module='brocade-system-monitor-ext', yang_type='system-monitor-health-state-enum', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """switch_state must be of a type compatible with system-monitor-health-state-enum""",
'defined-type': "brocade-system-monitor-ext:system-monitor-health-state-enum",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'state-healthy': {}, u'state-unknown': {}, u'state-unmonitored': {}, u'state-down': {}, u'state-marginal': {}},), is_leaf=True, yang_name="switch-state", rest_name="switch-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-system-monitor-ext', defining_module='brocade-system-monitor-ext', yang_type='system-monitor-health-state-enum', is_config=True)""",
})
self.__switch_state = t
if hasattr(self, '_set'):
self._set() | Setter method for switch_state, mapped from YANG variable /brocade_system_monitor_ext_rpc/show_system_monitor/output/switch_status/switch_state (system-monitor-health-state-enum)
If this variable is read-only (config: false) in the
source YANG file, then _set_switch_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_switch_state() directly.
YANG Description: switch status based on components |
def _schedule_pending_unlocked(self, state):
"""
Consider the pending transfers for a stream, pumping new chunks while
the unacknowledged byte count is below :attr:`window_size_bytes`. Must
be called with the FileStreamState lock held.
:param FileStreamState state:
Stream to schedule chunks for.
"""
while state.jobs and state.unacked < self.window_size_bytes:
sender, fp = state.jobs[0]
s = fp.read(self.IO_SIZE)
if s:
state.unacked += len(s)
sender.send(mitogen.core.Blob(s))
else:
# File is done. Cause the target's receive loop to exit by
# closing the sender, close the file, and remove the job entry.
sender.close()
fp.close()
state.jobs.pop(0) | Consider the pending transfers for a stream, pumping new chunks while
the unacknowledged byte count is below :attr:`window_size_bytes`. Must
be called with the FileStreamState lock held.
:param FileStreamState state:
Stream to schedule chunks for. |
def diet(file, configuration, check):
"""Simple program that either print config customisations for your
environment or compresses file FILE."""
config = process.read_yaml_configuration(configuration)
process.diet(file, config) | Simple program that either print config customisations for your
environment or compresses file FILE. |
def find_all_template(im_source, im_search, threshold=0.5, maxcnt=0, rgb=False, bgremove=False):
'''
Locate image position with cv2.templateFind
Use pixel match to find pictures.
Args:
im_source(string): 图像、素材
im_search(string): 需要查找的图片
threshold: 阈值,当相识度小于该阈值的时候,就忽略掉
Returns:
A tuple of found [(point, score), ...]
Raises:
IOError: when file read error
'''
# method = cv2.TM_CCORR_NORMED
# method = cv2.TM_SQDIFF_NORMED
method = cv2.TM_CCOEFF_NORMED
if rgb:
s_bgr = cv2.split(im_search) # Blue Green Red
i_bgr = cv2.split(im_source)
weight = (0.3, 0.3, 0.4)
resbgr = [0, 0, 0]
for i in range(3): # bgr
resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], method)
res = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
else:
s_gray = cv2.cvtColor(im_search, cv2.COLOR_BGR2GRAY)
i_gray = cv2.cvtColor(im_source, cv2.COLOR_BGR2GRAY)
# 边界提取(来实现背景去除的功能)
if bgremove:
s_gray = cv2.Canny(s_gray, 100, 200)
i_gray = cv2.Canny(i_gray, 100, 200)
res = cv2.matchTemplate(i_gray, s_gray, method)
w, h = im_search.shape[1], im_search.shape[0]
result = []
while True:
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
if DEBUG:
print('templmatch_value(thresh:%.1f) = %.3f' %(threshold, max_val)) # not show debug
if max_val < threshold:
break
# calculator middle point
middle_point = (top_left[0]+w/2, top_left[1]+h/2)
result.append(dict(
result=middle_point,
rectangle=(top_left, (top_left[0], top_left[1] + h), (top_left[0] + w, top_left[1]), (top_left[0] + w, top_left[1] + h)),
confidence=max_val
))
if maxcnt and len(result) >= maxcnt:
break
# floodfill the already found area
cv2.floodFill(res, None, max_loc, (-1000,), max_val-threshold+0.1, 1, flags=cv2.FLOODFILL_FIXED_RANGE)
return result | Locate image position with cv2.templateFind
Use pixel match to find pictures.
Args:
im_source(string): 图像、素材
im_search(string): 需要查找的图片
threshold: 阈值,当相识度小于该阈值的时候,就忽略掉
Returns:
A tuple of found [(point, score), ...]
Raises:
IOError: when file read error |
def get_internal_header(request: HttpRequest) -> str:
"""
Return request's 'X_POLYAXON_INTERNAL:' header, as a bytestring.
"""
return get_header(request=request, header_service=conf.get('HEADERS_INTERNAL')) | Return request's 'X_POLYAXON_INTERNAL:' header, as a bytestring. |
def set_empty_symbol(self):
"""Resets the context, retaining the fields that make it a child of its container (``container``, ``queue``,
``depth``, ``whence``), and sets an empty ``pending_symbol``.
This is useful when an empty quoted symbol immediately follows a long string.
"""
self.field_name = None
self.annotations = None
self.ion_type = None
self.set_pending_symbol(CodePointArray())
return self | Resets the context, retaining the fields that make it a child of its container (``container``, ``queue``,
``depth``, ``whence``), and sets an empty ``pending_symbol``.
This is useful when an empty quoted symbol immediately follows a long string. |
def run(graph, save_on_github=False, main_entity=None):
"""
2016-11-30
"""
try:
ontology = graph.all_ontologies[0]
uri = ontology.uri
except:
ontology = None
uri = ";".join([s for s in graph.sources])
# ontotemplate = open("template.html", "r")
ontotemplate = open(ONTODOCS_VIZ_TEMPLATES + "sigmajs.html", "r")
t = Template(ontotemplate.read())
dict_graph = build_class_json(graph.classes)
JSON_DATA_CLASSES = json.dumps(dict_graph)
if False:
c_mylist = build_D3treeStandard(0, 99, 1, graph.toplayer_classes)
p_mylist = build_D3treeStandard(0, 99, 1, graph.toplayer_properties)
s_mylist = build_D3treeStandard(0, 99, 1, graph.toplayer_skos)
c_total = len(graph.classes)
p_total = len(graph.all_properties)
s_total = len(graph.all_skos_concepts)
# hack to make sure that we have a default top level object
JSON_DATA_CLASSES = json.dumps({'children' : c_mylist, 'name' : 'owl:Thing', 'id' : "None" })
JSON_DATA_PROPERTIES = json.dumps({'children' : p_mylist, 'name' : 'Properties', 'id' : "None" })
JSON_DATA_CONCEPTS = json.dumps({'children' : s_mylist, 'name' : 'Concepts', 'id' : "None" })
c = Context({
"ontology": ontology,
"main_uri" : uri,
"STATIC_PATH": ONTODOCS_VIZ_STATIC,
"classes": graph.classes,
"classes_TOPLAYER": len(graph.toplayer_classes),
"properties": graph.all_properties,
"properties_TOPLAYER": len(graph.toplayer_properties),
"skosConcepts": graph.all_skos_concepts,
"skosConcepts_TOPLAYER": len(graph.toplayer_skos),
# "TOTAL_CLASSES": c_total,
# "TOTAL_PROPERTIES": p_total,
# "TOTAL_CONCEPTS": s_total,
'JSON_DATA_CLASSES' : JSON_DATA_CLASSES,
# 'JSON_DATA_PROPERTIES' : JSON_DATA_PROPERTIES,
# 'JSON_DATA_CONCEPTS' : JSON_DATA_CONCEPTS,
})
rnd = t.render(c)
return safe_str(rnd) | 2016-11-30 |
def user_getfield(self, field, access_token=None):
"""
Request a single field of information about the user.
:param field: The name of the field requested.
:type field: str
:returns: The value of the field. Depending on the type, this may be
a string, list, dict, or something else.
:rtype: object
.. versionadded:: 1.0
"""
info = self.user_getinfo([field], access_token)
return info.get(field) | Request a single field of information about the user.
:param field: The name of the field requested.
:type field: str
:returns: The value of the field. Depending on the type, this may be
a string, list, dict, or something else.
:rtype: object
.. versionadded:: 1.0 |
def threader(group=None, name=None, daemon=True):
""" decorator to thread functions
:param group: reserved for future extension when a ThreadGroup class is implemented
:param name: thread name
:param daemon: thread behavior
:rtype: decorator
"""
def decorator(job):
"""
:param job: function to be threaded
:rtype: wrap
"""
def wrapped_job(queue, *args, **kwargs):
""" this function calls the decorated function
and puts the result in a queue
:type queue: Queue
"""
ret = job(*args, **kwargs)
queue.put(ret)
def wrap(*args, **kwargs):
""" this is the function returned from the decorator. It fires off
wrapped_f in a new thread and returns the thread object with
the result queue attached
:rtype: Thread
"""
thread = Thread(group=group, target=wrapped_job, name=name, args=args, kwargs=kwargs, daemon=daemon)
thread.start()
return thread
return wrap
return decorator | decorator to thread functions
:param group: reserved for future extension when a ThreadGroup class is implemented
:param name: thread name
:param daemon: thread behavior
:rtype: decorator |
def get(self, *keys, fallback=None):
"""Retrieve a value in the config, if the value is not available
give the fallback value specified.
"""
section, *keys = keys
out = super().get(section, fallback)
while isinstance(out, dict):
key = keys.pop(0)
out = out.get(key, fallback)
return out | Retrieve a value in the config, if the value is not available
give the fallback value specified. |
def g_square_bin(dm, x, y, s):
"""G square test for a binary data.
Args:
dm: the data matrix to be used (as a numpy.ndarray).
x: the first node (as an integer).
y: the second node (as an integer).
s: the set of neibouring nodes of x and y (as a set()).
Returns:
p_val: the p-value of conditional independence.
"""
def _calculate_tlog(x, y, s, dof, dm):
nijk = np.zeros((2, 2, dof))
s_size = len(s)
z = []
for z_index in range(s_size):
z.append(s.pop())
pass
for row_index in range(0, dm.shape[0]):
i = dm[row_index, x]
j = dm[row_index, y]
k = []
k_index = 0
for z_index in range(s_size):
k_index += dm[row_index, z[z_index]] * int(pow(2, z_index))
pass
nijk[i, j, k_index] += 1
pass
nik = np.ndarray((2, dof))
njk = np.ndarray((2, dof))
for k_index in range(dof):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((2, 2 , dof))
tlog.fill(np.nan)
for k in range(dof):
tx = np.array([nik[:,k]]).T
ty = np.array([njk[:,k]])
tdijk = tx.dot(ty)
tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk
pass
return (nijk, tlog)
_logger.debug('Edge %d -- %d with subset: %s' % (x, y, s))
row_size = dm.shape[0]
s_size = len(s)
dof = int(pow(2, s_size))
row_size_required = 10 * dof
if row_size < row_size_required:
_logger.warning('Not enough samples. %s is too small. Need %s.'
% (str(row_size), str(row_size_required)))
return 1
nijk = None
if s_size < 6:
if s_size == 0:
nijk = np.zeros((2, 2))
for row_index in range(0, dm.shape[0]):
i = dm[row_index, x]
j = dm[row_index, y]
nijk[i, j] += 1
pass
tx = np.array([nijk.sum(axis = 1)]).T
ty = np.array([nijk.sum(axis = 0)])
tdij = tx.dot(ty)
tlog = nijk * row_size / tdij
pass
if s_size > 0:
nijk, tlog = _calculate_tlog(x, y, s, dof, dm)
pass
pass
else:
# s_size >= 6
nijk = np.zeros((2, 2, 1))
i = dm[0, x]
j = dm[0, y]
k = []
for z in s:
k.append(dm[:,z])
pass
k = np.array(k).T
parents_count = 1
parents_val = np.array([k[0,:]])
nijk[i, j, parents_count - 1] = 1
for it_sample in range(1, row_size):
is_new = True
i = dm[it_sample, x]
j = dm[it_sample, y]
tcomp = parents_val[:parents_count,:] == k[it_sample,:]
for it_parents in range(parents_count):
if np.all(tcomp[it_parents,:]):
nijk[i, j, it_parents] += 1
is_new = False
break
pass
if is_new is True:
parents_count += 1
parents_val = np.r_[parents_val, [k[it_sample,:]]]
nnijk = np.zeros((2,2,parents_count))
for p in range(parents_count - 1):
nnijk[:,:,p] = nijk[:,:,p]
nnijk[i, j, parents_count - 1] = 1
nijk = nnijk
pass
pass
nik = np.ndarray((2, parents_count))
njk = np.ndarray((2, parents_count))
for k_index in range(parents_count):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((2, 2 , parents_count))
tlog.fill(np.nan)
for k in range(parents_count):
tX = np.array([nik[:,k]]).T
tY = np.array([njk[:,k]])
tdijk = tX.dot(tY)
tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk
pass
pass
log_tlog = np.log(tlog)
G2 = np.nansum(2 * nijk * log_tlog)
# _logger.debug('dof = %d' % dof)
# _logger.debug('nijk = %s' % nijk)
# _logger.debug('tlog = %s' % tlog)
# _logger.debug('log(tlog) = %s' % log_tlog)
_logger.debug('G2 = %f' % G2)
p_val = chi2.sf(G2, dof)
_logger.info('p_val = %s' % str(p_val))
return p_val | G square test for a binary data.
Args:
dm: the data matrix to be used (as a numpy.ndarray).
x: the first node (as an integer).
y: the second node (as an integer).
s: the set of neibouring nodes of x and y (as a set()).
Returns:
p_val: the p-value of conditional independence. |
def to_numpy(self, dtype=None, copy=False):
"""Convert the DataFrame to a NumPy array.
Args:
dtype: The dtype to pass to numpy.asarray()
copy: Whether to ensure that the returned value is a not a view on another
array.
Returns:
A numpy array.
"""
return self._default_to_pandas("to_numpy", dtype=dtype, copy=copy) | Convert the DataFrame to a NumPy array.
Args:
dtype: The dtype to pass to numpy.asarray()
copy: Whether to ensure that the returned value is a not a view on another
array.
Returns:
A numpy array. |
def _init_record(self, record_type_idstr):
"""Override this from osid.Extensible because Forms use a different
attribute in record_type_data."""
record_type_data = self._record_type_data_sets[Id(record_type_idstr).get_identifier()]
module = importlib.import_module(record_type_data['module_path'])
record = getattr(module, record_type_data['form_record_class_name'])
if record is not None:
self._records[record_type_idstr] = record(self)
return True
else:
return False | Override this from osid.Extensible because Forms use a different
attribute in record_type_data. |
def put(self, destination):
""" Copy the referenced directory to this path
The semantics of this command are similar to unix ``cp``: if ``destination`` already
exists, the copied directory will be put at ``[destination] // [basename(localpath)]``. If
it does not already exist, the directory will be renamed to this path (the parent directory
must exist).
Args:
destination (str): path to put this directory
"""
target = get_target_path(destination, self.localpath)
shutil.copytree(self.localpath, target) | Copy the referenced directory to this path
The semantics of this command are similar to unix ``cp``: if ``destination`` already
exists, the copied directory will be put at ``[destination] // [basename(localpath)]``. If
it does not already exist, the directory will be renamed to this path (the parent directory
must exist).
Args:
destination (str): path to put this directory |
def __learn_oneself(self):
"""calculate cardinality, total and average string length"""
if not self.__parent_path or not self.__text_nodes:
raise Exception("This error occurred because the step constructor\
had insufficient textnodes or it had empty string\
for its parent xpath")
# Iterate through text nodes and sum up text length
# TODO: consider naming this child_count or cardinality
# or branch_cnt
self.tnodes_cnt = len(self.__text_nodes)
# consider naming this total
self.ttl_strlen = sum([len(tnode) for tnode in self.__text_nodes])
# consider naming this average
self.avg_strlen = self.ttl_strlen/self.tnodes_cnt | calculate cardinality, total and average string length |
def __pop_frames_above(self, frame):
"""Pops all the frames above, but not including the given frame."""
while self.__stack[-1] is not frame:
self.__pop_top_frame()
assert self.__stack | Pops all the frames above, but not including the given frame. |
def netconf_state_datastores_datastore_locks_lock_type_global_lock_global_lock_locked_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
datastores = ET.SubElement(netconf_state, "datastores")
datastore = ET.SubElement(datastores, "datastore")
name_key = ET.SubElement(datastore, "name")
name_key.text = kwargs.pop('name')
locks = ET.SubElement(datastore, "locks")
lock_type = ET.SubElement(locks, "lock-type")
global_lock = ET.SubElement(lock_type, "global-lock")
global_lock = ET.SubElement(global_lock, "global-lock")
locked_time = ET.SubElement(global_lock, "locked-time")
locked_time.text = kwargs.pop('locked_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def library_line(self, file_name):
"""
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
"""
gulplib_set = lambda: 'GULP_LIB' in os.environ.keys()
readable = lambda f: os.path.isfile(f) and os.access(f, os.R_OK)
#dirpath, fname = os.path.split(file_name)
#if dirpath: # Full path specified
# if readable(file_name):
# gin = 'library ' + file_name
# else:
# raise GulpError('GULP Library not found')
#else:
# fpath = os.path.join(os.getcwd(), file_name) # Check current dir
# if readable(fpath):
# gin = 'library ' + fpath
# elif gulplib_set():
# fpath = os.path.join(os.environ['GULP_LIB'], file_name)
# if readable(fpath):
# gin = 'library ' + file_name
# else:
# raise GulpError('GULP Library not found')
# else:
# raise GulpError('GULP Library not found')
#gin += "\n"
#return gin
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name): # Full path specified
gin = 'library ' + file_name
else:
fpath = os.path.join(os.getcwd(), file_name) # Check current dir
if readable(fpath):
gin = 'library ' + fpath
elif gulplib_set(): # Check the GULP_LIB path
fpath = os.path.join(os.environ['GULP_LIB'], file_name)
if readable(fpath):
gin = 'library ' + file_name
if gin:
return gin + "\n"
else:
raise GulpError('GULP Library not found') | Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option |
def sortTitles(self, by):
"""
Sort titles by a given attribute and then by title.
@param by: A C{str}, one of 'length', 'maxScore', 'medianScore',
'readCount', or 'title'.
@raise ValueError: If an unknown C{by} value is given.
@return: A sorted C{list} of titles.
"""
# First sort titles by the secondary key, which is always the title.
titles = sorted(iter(self))
# Then sort on the primary key (if any).
if by == 'length':
return sorted(
titles, reverse=True,
key=lambda title: self[title].subjectLength)
if by == 'maxScore':
return sorted(
titles, reverse=True, key=lambda title: self[title].bestHsp())
if by == 'medianScore':
return sorted(
titles, reverse=True,
key=lambda title: self.scoreClass(self[title].medianScore()))
if by == 'readCount':
return sorted(
titles, reverse=True,
key=lambda title: self[title].readCount())
if by == 'title':
return titles
raise ValueError('Sort attribute must be one of "length", "maxScore", '
'"medianScore", "readCount", "title".') | Sort titles by a given attribute and then by title.
@param by: A C{str}, one of 'length', 'maxScore', 'medianScore',
'readCount', or 'title'.
@raise ValueError: If an unknown C{by} value is given.
@return: A sorted C{list} of titles. |
def GetValueByPath(self, path_segments):
"""Retrieves a plist value by path.
Args:
path_segments (list[str]): path segment strings relative to the root
of the plist.
Returns:
object: The value of the key specified by the path or None.
"""
key = self.root_key
for path_segment in path_segments:
if isinstance(key, dict):
try:
key = key[path_segment]
except KeyError:
return None
elif isinstance(key, list):
try:
list_index = int(path_segment, 10)
except ValueError:
return None
key = key[list_index]
else:
return None
if not key:
return None
return key | Retrieves a plist value by path.
Args:
path_segments (list[str]): path segment strings relative to the root
of the plist.
Returns:
object: The value of the key specified by the path or None. |
def shift(self,
periods: int,
axis: libinternals.BlockPlacement = 0,
fill_value: Any = None) -> List['ExtensionBlock']:
"""
Shift the block by `periods`.
Dispatches to underlying ExtensionArray and re-boxes in an
ExtensionBlock.
"""
return [
self.make_block_same_class(
self.values.shift(periods=periods, fill_value=fill_value),
placement=self.mgr_locs, ndim=self.ndim)
] | Shift the block by `periods`.
Dispatches to underlying ExtensionArray and re-boxes in an
ExtensionBlock. |
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-post"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "canmatrix/_version.py"
cfg.verbose = False
return cfg | Create, populate and return the VersioneerConfig() object. |
def get_Note(self, string=0, fret=0, maxfret=24):
"""Return the Note on 'string', 'fret'.
Throw a RangeError if either the fret or string is unplayable.
Examples:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4'])
>>> t,get_Note(0, 0)
'A-3'
>>> t.get_Note(0, 1)
'A#-3'
>>> t.get_Note(1, 0)
'A-4'
"""
if 0 <= string < self.count_strings():
if 0 <= fret <= maxfret:
s = self.tuning[string]
if type(s) == list:
s = s[0]
n = Note(int(s) + fret)
n.string = string
n.fret = fret
return n
else:
raise RangeError("Fret '%d' on string '%d' is out of range"
% (string, fret))
else:
raise RangeError("String '%d' out of range" % string) | Return the Note on 'string', 'fret'.
Throw a RangeError if either the fret or string is unplayable.
Examples:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4'])
>>> t,get_Note(0, 0)
'A-3'
>>> t.get_Note(0, 1)
'A#-3'
>>> t.get_Note(1, 0)
'A-4' |
def filter_pyfqn(cls, value, relative_to=0):
"""
Returns Python form of fully qualified name.
Args:
relative_to: If greater 0, the returned path is relative to the first n directories.
"""
def collect_packages(element, packages):
parent = element.eContainer()
if parent:
collect_packages(parent, packages)
packages.append(element.name)
packages = []
collect_packages(value, packages)
if relative_to < 0 or relative_to > len(packages):
raise ValueError('relative_to not in range of number of packages')
fqn = '.'.join(packages[relative_to:])
if relative_to:
fqn = '.' + fqn
return cls.module_path_map.get(fqn, fqn) | Returns Python form of fully qualified name.
Args:
relative_to: If greater 0, the returned path is relative to the first n directories. |
def _interval(dates):
"""Return the distance between all dates and 0 if they are different"""
interval = (dates[1] - dates[0]).days
last = dates[0]
for dat in dates[1:]:
if (dat - last).days != interval:
return 0
last = dat
return interval | Return the distance between all dates and 0 if they are different |
def get_complete_ph_dos(partial_dos_path, phonopy_yaml_path):
"""
Creates a pymatgen CompletePhononDos from a partial_dos.dat and
phonopy.yaml files.
The second is produced when generating a Dos and is needed to extract
the structure.
Args:
partial_dos_path: path to the partial_dos.dat file.
phonopy_yaml_path: path to the phonopy.yaml file.
"""
a = np.loadtxt(partial_dos_path).transpose()
d = loadfn(phonopy_yaml_path)
structure = get_structure_from_dict(d['primitive_cell'])
total_dos = PhononDos(a[0], a[1:].sum(axis=0))
pdoss = {}
for site, pdos in zip(structure, a[1:]):
pdoss[site] = pdos.tolist()
return CompletePhononDos(structure, total_dos, pdoss) | Creates a pymatgen CompletePhononDos from a partial_dos.dat and
phonopy.yaml files.
The second is produced when generating a Dos and is needed to extract
the structure.
Args:
partial_dos_path: path to the partial_dos.dat file.
phonopy_yaml_path: path to the phonopy.yaml file. |
def boolean(flag):
"""
Convert string in boolean
"""
s = flag.lower()
if s in ('1', 'yes', 'true'):
return True
elif s in ('0', 'no', 'false'):
return False
raise ValueError('Unknown flag %r' % s) | Convert string in boolean |
def CreateUser(self, database_link, user, options=None):
"""Creates a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to create.
:param dict options:
The request options for the request.
:return:
The created User.
:rtype:
dict
"""
if options is None:
options = {}
database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user)
return self.Create(user,
path,
'users',
database_id,
None,
options) | Creates a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to create.
:param dict options:
The request options for the request.
:return:
The created User.
:rtype:
dict |
def __updateStack(self, key):
"""
Update the input stack in non-hotkey mode, and determine if anything
further is needed.
@return: True if further action is needed
"""
#if self.lastMenu is not None:
# if not ConfigManager.SETTINGS[MENU_TAKES_FOCUS]:
# self.app.hide_menu()
#
# self.lastMenu = None
if key == Key.ENTER:
# Special case - map Enter to \n
key = '\n'
if key == Key.TAB:
# Special case - map Tab to \t
key = '\t'
if key == Key.BACKSPACE:
if ConfigManager.SETTINGS[UNDO_USING_BACKSPACE] and self.phraseRunner.can_undo():
self.phraseRunner.undo_expansion()
else:
# handle backspace by dropping the last saved character
try:
self.inputStack.pop()
except IndexError:
# in case self.inputStack is empty
pass
return False
elif len(key) > 1:
# non-simple key
self.inputStack.clear()
self.phraseRunner.clear_last()
return False
else:
# Key is a character
self.phraseRunner.clear_last()
# if len(self.inputStack) == MAX_STACK_LENGTH, front items will removed for appending new items.
self.inputStack.append(key)
return True | Update the input stack in non-hotkey mode, and determine if anything
further is needed.
@return: True if further action is needed |
def _parse_message(self, data):
"""
Parses the raw message from the device.
:param data: message data to parse
:type data: string
:raises: :py:class:`~alarmdecoder.util.InvalidMessageError`
"""
try:
_, values = data.split(':')
values = values.split(',')
# Handle older-format events
if len(values) <= 3:
self.event_data, self.partition, self.event_type = values
self.version = 1
# Newer-format events
else:
self.event_data, self.partition, self.event_type, self.report_code = values
self.version = 2
event_type_data = self.event_type.split('_')
self.event_prefix = event_type_data[0] # Ex: CID
self.event_source = get_event_source(self.event_prefix) # Ex: LRR_EVENT_TYPE.CID
self.event_status = int(event_type_data[1][0]) # Ex: 1 or 3
self.event_code = int(event_type_data[1][1:], 16) # Ex: 0x100 = Medical
# replace last 2 digits of event_code with report_code, if applicable.
if not self.skip_report_override and self.report_code not in ['00', 'ff']:
self.event_code = int(event_type_data[1][1] + self.report_code, 16)
self.event_description = get_event_description(self.event_source, self.event_code)
except ValueError:
raise InvalidMessageError('Received invalid message: {0}'.format(data)) | Parses the raw message from the device.
:param data: message data to parse
:type data: string
:raises: :py:class:`~alarmdecoder.util.InvalidMessageError` |
def get_subset(self, subset_ids):
"""
Returns a smaller dataset identified by their keys/sample IDs.
Parameters
----------
subset_ids : list
List od sample IDs to extracted from the dataset.
Returns
-------
sub-dataset : MLDataset
sub-dataset containing only requested sample IDs.
"""
num_existing_keys = sum([1 for key in subset_ids if key in self.__data])
if subset_ids is not None and num_existing_keys > 0:
# ensure items are added to data, labels etc in the same order of sample IDs
# TODO come up with a way to do this even when not using OrderedDict()
# putting the access of data, labels and classes in the same loop would
# ensure there is correspondence across the three attributes of the class
data = self.__get_subset_from_dict(self.__data, subset_ids)
labels = self.__get_subset_from_dict(self.__labels, subset_ids)
if self.__classes is not None:
classes = self.__get_subset_from_dict(self.__classes, subset_ids)
else:
classes = None
subdataset = MLDataset(data=data, labels=labels, classes=classes)
# Appending the history
subdataset.description += '\n Subset derived from: ' + self.description
subdataset.feature_names = self.__feature_names
subdataset.__dtype = self.dtype
return subdataset
else:
warnings.warn('subset of IDs requested do not exist in the dataset!')
return MLDataset() | Returns a smaller dataset identified by their keys/sample IDs.
Parameters
----------
subset_ids : list
List od sample IDs to extracted from the dataset.
Returns
-------
sub-dataset : MLDataset
sub-dataset containing only requested sample IDs. |
def _expectation(p, kern1, feat1, kern2, feat2, nghp=None):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
- Ka_{.,.}, Kb_{.,.} :: RBF kernels
Ka and Kb as well as Z1 and Z2 can differ from each other.
:return: N x dim(Z1) x dim(Z2)
"""
if kern1.on_separate_dims(kern2) and isinstance(p, DiagonalGaussian):
# no joint expectations required
eKxz1 = expectation(p, (kern1, feat1))
eKxz2 = expectation(p, (kern2, feat2))
return eKxz1[:, :, None] * eKxz2[:, None, :]
Ka, Kb = kern1, kern2
with params_as_tensors_for(Ka, feat1, Kb, feat2):
# use only active dimensions
Xcov = Ka._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov)
Z1, Xmu = Ka._slice(feat1.Z, p.mu)
N = tf.shape(Xmu)[0]
D = tf.shape(Xmu)[1]
def get_squared_length_scales(kern):
squared_lengthscales = kern.lengthscales ** 2. if kern.ARD \
else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales ** 2.
return squared_lengthscales
if Ka == Kb:
La = get_squared_length_scales(Ka)
Lb = La
half_mean_L = La * 0.5 # average length scale
else:
La, Lb = map(get_squared_length_scales, (Ka, Kb))
half_mean_L = La * Lb / (La + Lb) # average length scale
sqrt_det_L = tf.reduce_prod(half_mean_L) ** 0.5
C = tf.cholesky(tf.matrix_diag(half_mean_L) + Xcov) # [N, D, D]
dets = sqrt_det_L / tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(C)), axis=1)) # N
# for mahalanobis computation we need Zᵀ (CCᵀ)⁻¹ Z as well as C⁻¹ Z
# with Z = Z₁, Z₂ for two rbf kernels
def get_cholesky_solve_terms(Z, C=C):
C_inv_z = tf.matrix_triangular_solve(
C, tf.tile(tf.expand_dims(tf.transpose(Z), 0),
[N, 1, 1]), lower=True) # [N, D, M]
z_CC_inv_z = tf.reduce_sum(tf.square(C_inv_z), 1) # [N, M]
return C_inv_z, z_CC_inv_z
C_inv_mu = tf.matrix_triangular_solve(C, tf.expand_dims(Xmu, 2), lower=True) # [N, D, 1]
mu_CC_inv_mu = tf.expand_dims(tf.reduce_sum(tf.square(C_inv_mu), 1), 2) # [N, 1, 1]
C_inv_z1, z1_CC_inv_z1 = get_cholesky_solve_terms(Z1 / La * half_mean_L)
z1_CC_inv_mu = 2 * tf.matmul(C_inv_z1, C_inv_mu, transpose_a=True)[:, :, 0] # [N, M1]
if feat1 == feat2 and Ka == Kb:
# in this case Z2==Z1 so we can reuse the Z1 terms
C_inv_z2, z2_CC_inv_z2 = C_inv_z1, z1_CC_inv_z1
z2_CC_inv_mu = z1_CC_inv_mu # [N, M]
Z2 = Z1
else:
# compute terms related to Z2
Z2, _ = Kb._slice(feat2.Z, p.mu)
C_inv_z2, z2_CC_inv_z2 = get_cholesky_solve_terms(Z2 / Lb * half_mean_L)
z2_CC_inv_mu = 2 * tf.matmul(C_inv_z2, C_inv_mu, transpose_a=True)[:, :, 0] # [N, M2]
z1_CC_inv_z2 = tf.matmul(C_inv_z1, C_inv_z2, transpose_a=True) # [N, M1, M2]
# expand dims for broadcasting
# along M1
z2_CC_inv_mu = tf.expand_dims(z2_CC_inv_mu, 1) # [N, 1, M2]
z2_CC_inv_z2 = tf.expand_dims(z2_CC_inv_z2, 1)
# along M2
z1_CC_inv_mu = tf.expand_dims(z1_CC_inv_mu, 2) # [N, M1, 1]
z1_CC_inv_z1 = tf.expand_dims(z1_CC_inv_z1, 2)
# expanded version of ((Z1 + Z2)-mu) (CCT)-1 ((Z1 + Z2)-mu)
mahalanobis = mu_CC_inv_mu + z2_CC_inv_z2 + \
z1_CC_inv_z1 + 2 * z1_CC_inv_z2 - \
z1_CC_inv_mu - z2_CC_inv_mu # [N, M1, M2]
exp_mahalanobis = tf.exp(-0.5 * mahalanobis) # [N, M1, M2]
if Z1 == Z2:
# CAVEAT : Compute sqrt(self.K(Z)) explicitly
# to prevent automatic gradient from
# being NaN sometimes, see pull request #615
sqrt_exp_dist = tf.exp(-0.25 * Ka.scaled_square_dist(Z1, None))
else:
# Compute exp( -.5 (Z-Z')^top (L_1+L_2)^{-1} (Z-Z') )
lengthscales_rms = tf.sqrt(La + Lb)
Z1 = Z1 / lengthscales_rms
Z1sqr = tf.reduce_sum(tf.square(Z1), axis=1)
Z2 = Z2 / lengthscales_rms
Z2sqr = tf.reduce_sum(tf.square(Z2), axis=1)
dist = -2 * tf.matmul(Z1, Z2, transpose_b=True) \
+ tf.reshape(Z1sqr, (-1, 1)) + tf.reshape(Z2sqr, (1, -1))
sqrt_exp_dist = tf.exp(-0.5 * dist) # M1 x M2
return Ka.variance * Kb.variance * sqrt_exp_dist * \
tf.reshape(dets, [N, 1, 1]) * exp_mahalanobis | Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
- Ka_{.,.}, Kb_{.,.} :: RBF kernels
Ka and Kb as well as Z1 and Z2 can differ from each other.
:return: N x dim(Z1) x dim(Z2) |
def stopPoll(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stoppoll
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`amanobot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopPoll', _rectify(p)) | See: https://core.telegram.org/bots/api#stoppoll
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`amanobot.message_identifier` |
def _retry_task(provider, job_descriptor, task_id, task_attempt):
"""Retry task_id (numeric id) assigning it task_attempt."""
td_orig = job_descriptor.find_task_descriptor(task_id)
new_task_descriptors = [
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': task_attempt
}, td_orig.task_params, td_orig.task_resources)
]
# Update the logging path.
_resolve_task_resources(job_descriptor.job_metadata,
job_descriptor.job_resources, new_task_descriptors)
provider.submit_job(
job_model.JobDescriptor(
job_descriptor.job_metadata, job_descriptor.job_params,
job_descriptor.job_resources, new_task_descriptors), False) | Retry task_id (numeric id) assigning it task_attempt. |
def propose_template(self, template_id, from_account):
"""Propose a template.
:param template_id: id of the template, str
:param from_account: Account
:return: bool
"""
tx_hash = self.send_transaction(
'proposeTemplate',
(template_id,),
transact={'from': from_account.address,
'passphrase': from_account.password})
return self.get_tx_receipt(tx_hash).status == 1 | Propose a template.
:param template_id: id of the template, str
:param from_account: Account
:return: bool |
def list_repos(**kwargs):
'''
Lists all repos in the sources.list (and sources.lists.d) files
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
salt '*' pkg.list_repos disabled=True
'''
_check_apt()
repos = {}
sources = sourceslist.SourcesList()
for source in sources.list:
if _skip_source(source):
continue
repo = {}
repo['file'] = source.file
repo['comps'] = getattr(source, 'comps', [])
repo['disabled'] = source.disabled
repo['enabled'] = not repo['disabled'] # This is for compatibility with the other modules
repo['dist'] = source.dist
repo['type'] = source.type
repo['uri'] = source.uri.rstrip('/')
repo['line'] = salt.utils.pkg.deb.strip_uri(source.line.strip())
repo['architectures'] = getattr(source, 'architectures', [])
repos.setdefault(source.uri, []).append(repo)
return repos | Lists all repos in the sources.list (and sources.lists.d) files
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
salt '*' pkg.list_repos disabled=True |
def write_resume_point(self):
"""Keeps a list of the number of iterations that were in a file when a
run was resumed from a checkpoint."""
try:
resume_pts = self.attrs["resume_points"].tolist()
except KeyError:
resume_pts = []
try:
niterations = self.niterations
except KeyError:
niterations = 0
resume_pts.append(niterations)
self.attrs["resume_points"] = resume_pts | Keeps a list of the number of iterations that were in a file when a
run was resumed from a checkpoint. |
def get_ip_address(domain):
"""
Get IP address for given `domain`. Try to do smart parsing.
Args:
domain (str): Domain or URL.
Returns:
str: IP address.
Raises:
ValueError: If can't parse the domain.
"""
if "://" not in domain:
domain = "http://" + domain
hostname = urlparse(domain).netloc
if not hostname:
raise ValueError("Can't parse hostname!")
return socket.gethostbyname(hostname) | Get IP address for given `domain`. Try to do smart parsing.
Args:
domain (str): Domain or URL.
Returns:
str: IP address.
Raises:
ValueError: If can't parse the domain. |
def list_():
'''
List the RAID devices.
CLI Example:
.. code-block:: bash
salt '*' raid.list
'''
ret = {}
for line in (__salt__['cmd.run_stdout']
(['mdadm', '--detail', '--scan'],
python_shell=False).splitlines()):
if ' ' not in line:
continue
comps = line.split()
device = comps[1]
ret[device] = {"device": device}
for comp in comps[2:]:
key = comp.split('=')[0].lower()
value = comp.split('=')[1]
ret[device][key] = value
return ret | List the RAID devices.
CLI Example:
.. code-block:: bash
salt '*' raid.list |
def run(self, circuit):
"""Run all the passes on a QuantumCircuit
Args:
circuit (QuantumCircuit): circuit to transform via all the registered passes
Returns:
QuantumCircuit: Transformed circuit.
"""
name = circuit.name
dag = circuit_to_dag(circuit)
del circuit
for passset in self.working_list:
for pass_ in passset:
dag = self._do_pass(pass_, dag, passset.options)
circuit = dag_to_circuit(dag)
circuit.name = name
return circuit | Run all the passes on a QuantumCircuit
Args:
circuit (QuantumCircuit): circuit to transform via all the registered passes
Returns:
QuantumCircuit: Transformed circuit. |
def __query_options(self):
"""Get the query options string to use for this query."""
options = 0
if self.__tailable:
options |= _QUERY_OPTIONS["tailable_cursor"]
if self.__slave_okay or self.__pool._slave_okay:
options |= _QUERY_OPTIONS["slave_okay"]
if not self.__timeout:
options |= _QUERY_OPTIONS["no_timeout"]
return options | Get the query options string to use for this query. |
def _check_method(self, method):
"""Check if self.estimator has 'method'.
Raises
------
AttributeError
"""
estimator = self._postfit_estimator
if not hasattr(estimator, method):
msg = "The wrapped estimator '{}' does not have a '{}' method.".format(
estimator, method
)
raise AttributeError(msg)
return getattr(estimator, method) | Check if self.estimator has 'method'.
Raises
------
AttributeError |
def save(self, data=None, shape=None, dtype=None, returnoffset=False,
photometric=None, planarconfig=None, extrasamples=None, tile=None,
contiguous=True, align=16, truncate=False, compress=0,
rowsperstrip=None, predictor=False, colormap=None,
description=None, datetime=None, resolution=None, subfiletype=0,
software='tifffile.py', metadata={}, ijmetadata=None,
extratags=()):
"""Write numpy array and tags to TIFF file.
The data shape's last dimensions are assumed to be image depth,
height (length), width, and samples.
If a colormap is provided, the data's dtype must be uint8 or uint16
and the data values are indices into the last dimension of the
colormap.
If 'shape' and 'dtype' are specified, an empty array is saved.
This option cannot be used with compression or multiple tiles.
Image data are written uncompressed in one strip per plane by default.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The SampleFormat and BitsPerSample tags are derived from the data type.
Parameters
----------
data : numpy.ndarray or None
Input image array.
shape : tuple or None
Shape of the empty array to save. Used only if 'data' is None.
dtype : numpy.dtype or None
Data-type of the empty array to save. Used only if 'data' is None.
returnoffset : bool
If True and the image data in the file is memory-mappable, return
the offset and number of bytes of the image data in the file.
photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'}
The color space of the image data.
By default, this setting is inferred from the data shape and the
value of colormap.
For CFA images, DNG tags must be specified in 'extratags'.
planarconfig : {'CONTIG', 'SEPARATE'}
Specifies if samples are stored interleaved or in separate planes.
By default, this setting is inferred from the data shape.
If this parameter is set, extra samples are used to store grayscale
images.
'CONTIG': last dimension contains samples.
'SEPARATE': third last dimension contains samples.
extrasamples : tuple of {'UNSPECIFIED', 'ASSOCALPHA', 'UNASSALPHA'}
Defines the interpretation of extra components in pixels.
'UNSPECIFIED': no transparency information (default).
'ASSOCALPHA': single, true transparency with pre-multiplied color.
'UNASSALPHA': independent transparency masks.
tile : tuple of int
The shape (depth, length, width) of image tiles to write.
If None (default), image data are written in strips.
The tile length and width must be a multiple of 16.
If the tile depth is provided, the SGI ImageDepth and TileDepth
tags are used to save volume data.
Unless a single tile is used, tiles cannot be used to write
contiguous files.
Few software can read the SGI format, e.g. MeVisLab.
contiguous : bool
If True (default) and the data and parameters are compatible with
previous ones, if any, the image data are stored contiguously after
the previous one. In that case, 'photometric', 'planarconfig',
'rowsperstrip', are ignored. Metadata such as 'description',
'metadata', 'datetime', and 'extratags' are written to the first
page of a contiguous series only.
align : int
Byte boundary on which to align the image data in the file.
Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data.
Following contiguous writes are not aligned.
truncate : bool
If True, only write the first page including shape metadata if
possible (uncompressed, contiguous, not tiled).
Other TIFF readers will only be able to read part of the data.
compress : int or str or (str, int)
If 0 (default), data are written uncompressed.
If 0-9, the level of ADOBE_DEFLATE compression.
If a str, one of TIFF.COMPRESSION, e.g. 'LZMA' or 'ZSTD'.
If a tuple, first item is one of TIFF.COMPRESSION and second item
is compression level.
Compression cannot be used to write contiguous files.
rowsperstrip : int
The number of rows per strip. By default strips will be ~64 KB
if compression is enabled, else rowsperstrip is set to the image
length. Bilevel images are always stored in one strip per plane.
predictor : bool
If True, apply horizontal differencing or floating-point predictor
before compression.
colormap : numpy.ndarray
RGB color values for the corresponding data value.
Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16.
description : str
The subject of the image. Must be 7-bit ASCII. Cannot be used with
the ImageJ format. Saved with the first page only.
datetime : datetime, str, or bool
Date and time of image creation in '%Y:%m:%d %H:%M:%S' format or
datetime object. Else if True, the current date and time is used.
Saved with the first page only.
resolution : (float, float[, str]) or ((int, int), (int, int)[, str])
X and Y resolutions in pixels per resolution unit as float or
rational numbers. A third, optional parameter specifies the
resolution unit, which must be None (default for ImageJ),
'INCH' (default), or 'CENTIMETER'.
subfiletype : int
Bitfield to indicate the kind of data. Set bit 0 if the image
is a reduced-resolution version of another image. Set bit 1 if
the image is part of a multi-page image. Set bit 2 if the image
is transparency mask for another image (photometric must be
MASK, SamplesPerPixel and BitsPerSample must be 1).
software : str
Name of the software used to create the file. Must be 7-bit ASCII.
Saved with the first page only.
metadata : dict
Additional metadata to be saved along with shape information
in JSON or ImageJ formats in an ImageDescription tag.
If None, do not write a second ImageDescription tag.
Strings must be 7-bit ASCII. Saved with the first page only.
ijmetadata : dict
Additional metadata to be saved in application specific
IJMetadata and IJMetadataByteCounts tags. Refer to the
imagej_metadata_tag function for valid keys and values.
Saved with the first page only.
extratags : sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q.
count : int
Number of data values. Not used for string or byte string
values.
value : sequence
'Count' values compatible with 'dtype'.
Byte strings must contain count values of dtype packed as
binary data.
writeonce : bool
If True, the tag is written to the first page only.
"""
# TODO: refactor this function
fh = self._fh
byteorder = self._byteorder
if data is None:
if compress:
raise ValueError('cannot save compressed empty file')
datashape = shape
datadtype = numpy.dtype(dtype).newbyteorder(byteorder)
datadtypechar = datadtype.char
else:
data = numpy.asarray(data, byteorder+data.dtype.char, 'C')
if data.size == 0:
raise ValueError('cannot save empty array')
datashape = data.shape
datadtype = data.dtype
datadtypechar = data.dtype.char
returnoffset = returnoffset and datadtype.isnative
bilevel = datadtypechar == '?'
if bilevel:
index = -1 if datashape[-1] > 1 else -2
datasize = product(datashape[:index])
if datashape[index] % 8:
datasize *= datashape[index] // 8 + 1
else:
datasize *= datashape[index] // 8
else:
datasize = product(datashape) * datadtype.itemsize
# just append contiguous data if possible
self._truncate = bool(truncate)
if self._datashape:
if (not contiguous
or self._datashape[1:] != datashape
or self._datadtype != datadtype
or (compress and self._tags)
or tile
or not numpy.array_equal(colormap, self._colormap)):
# incompatible shape, dtype, compression mode, or colormap
self._write_remaining_pages()
self._write_image_description()
self._truncate = False
self._descriptionoffset = 0
self._descriptionlenoffset = 0
self._datashape = None
self._colormap = None
if self._imagej:
raise ValueError(
'ImageJ does not support non-contiguous data')
else:
# consecutive mode
self._datashape = (self._datashape[0] + 1,) + datashape
if not compress:
# write contiguous data, write IFDs/tags later
offset = fh.tell()
if data is None:
fh.write_empty(datasize)
else:
fh.write_array(data)
if returnoffset:
return offset, datasize
return None
input_shape = datashape
tagnoformat = self._tagnoformat
valueformat = self._valueformat
offsetformat = self._offsetformat
offsetsize = self._offsetsize
tagsize = self._tagsize
MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK
MINISWHITE = TIFF.PHOTOMETRIC.MINISWHITE
RGB = TIFF.PHOTOMETRIC.RGB
CFA = TIFF.PHOTOMETRIC.CFA
PALETTE = TIFF.PHOTOMETRIC.PALETTE
CONTIG = TIFF.PLANARCONFIG.CONTIG
SEPARATE = TIFF.PLANARCONFIG.SEPARATE
# parse input
if photometric is not None:
photometric = enumarg(TIFF.PHOTOMETRIC, photometric)
if planarconfig:
planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig)
if extrasamples is None:
extrasamples_ = None
else:
extrasamples_ = tuple(enumarg(TIFF.EXTRASAMPLE, es)
for es in sequence(extrasamples))
if not compress:
compress = False
compresstag = 1
# TODO: support predictors without compression
predictor = False
predictortag = 1
else:
if isinstance(compress, (tuple, list)):
compress, compresslevel = compress
elif isinstance(compress, int):
compress, compresslevel = 'ADOBE_DEFLATE', int(compress)
if not 0 <= compresslevel <= 9:
raise ValueError('invalid compression level %s' % compress)
else:
compresslevel = None
compress = compress.upper()
compresstag = enumarg(TIFF.COMPRESSION, compress)
if predictor:
if datadtype.kind in 'iu':
predictortag = 2
predictor = TIFF.PREDICTORS[2]
elif datadtype.kind == 'f':
predictortag = 3
predictor = TIFF.PREDICTORS[3]
else:
raise ValueError('cannot apply predictor to %s' % datadtype)
# prepare ImageJ format
if self._imagej:
# if predictor or compress:
# warnings.warn(
# 'ImageJ cannot handle predictors or compression')
if description:
warnings.warn('not writing description to ImageJ file')
description = None
volume = False
if datadtypechar not in 'BHhf':
raise ValueError(
'ImageJ does not support data type %s' % datadtypechar)
ijrgb = photometric == RGB if photometric else None
if datadtypechar not in 'B':
ijrgb = False
ijshape = imagej_shape(datashape, ijrgb)
if ijshape[-1] in (3, 4):
photometric = RGB
if datadtypechar not in 'B':
raise ValueError('ImageJ does not support data type %s '
'for RGB' % datadtypechar)
elif photometric is None:
photometric = MINISBLACK
planarconfig = None
if planarconfig == SEPARATE:
raise ValueError('ImageJ does not support planar images')
planarconfig = CONTIG if ijrgb else None
# define compress function
if compress:
compressor = TIFF.COMPESSORS[compresstag]
if predictor:
def compress(data, level=compresslevel):
data = predictor(data, axis=-2)
return compressor(data, level)
else:
def compress(data, level=compresslevel):
return compressor(data, level)
# verify colormap and indices
if colormap is not None:
if datadtypechar not in 'BH':
raise ValueError('invalid data dtype for palette mode')
colormap = numpy.asarray(colormap, dtype=byteorder+'H')
if colormap.shape != (3, 2**(datadtype.itemsize * 8)):
raise ValueError('invalid color map shape')
self._colormap = colormap
# verify tile shape
if tile:
tile = tuple(int(i) for i in tile[:3])
volume = len(tile) == 3
if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or
any(i < 1 for i in tile)):
raise ValueError('invalid tile shape')
else:
tile = ()
volume = False
# normalize data shape to 5D or 6D, depending on volume:
# (pages, planar_samples, [depth,] height, width, contig_samples)
datashape = reshape_nd(datashape, 3 if photometric == RGB else 2)
shape = datashape
ndim = len(datashape)
samplesperpixel = 1
extrasamples = 0
if volume and ndim < 3:
volume = False
if colormap is not None:
photometric = PALETTE
planarconfig = None
if photometric is None:
photometric = MINISBLACK
if bilevel:
photometric = MINISWHITE
elif planarconfig == CONTIG:
if ndim > 2 and shape[-1] in (3, 4):
photometric = RGB
elif planarconfig == SEPARATE:
if volume and ndim > 3 and shape[-4] in (3, 4):
photometric = RGB
elif ndim > 2 and shape[-3] in (3, 4):
photometric = RGB
elif ndim > 2 and shape[-1] in (3, 4):
photometric = RGB
elif self._imagej:
photometric = MINISBLACK
elif volume and ndim > 3 and shape[-4] in (3, 4):
photometric = RGB
elif ndim > 2 and shape[-3] in (3, 4):
photometric = RGB
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
if photometric not in (0, 1, 3, 4):
photometric = MINISBLACK
if photometric == RGB:
if len(shape) < 3:
raise ValueError('not a RGB(A) image')
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = CONTIG
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = SEPARATE
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = SEPARATE
else:
planarconfig = CONTIG
if planarconfig == CONTIG:
datashape = (-1, 1) + shape[(-4 if volume else -3):]
samplesperpixel = datashape[-1]
else:
datashape = (-1,) + shape[(-4 if volume else -3):] + (1,)
samplesperpixel = datashape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif photometric == CFA:
if len(shape) != 2:
raise ValueError('invalid CFA image')
volume = False
planarconfig = None
datashape = (-1, 1) + shape[-2:] + (1,)
if 50706 not in (et[0] for et in extratags):
raise ValueError('must specify DNG tags for CFA image')
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == CONTIG:
datashape = (-1, 1) + shape[(-4 if volume else -3):]
samplesperpixel = datashape[-1]
else:
datashape = (-1,) + shape[(-4 if volume else -3):] + (1,)
samplesperpixel = datashape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1] # remove trailing 1s
if len(shape) < 3:
volume = False
if extrasamples_ is None:
datashape = (-1, 1) + shape[(-3 if volume else -2):] + (1,)
else:
datashape = (-1, 1) + shape[(-4 if volume else -3):]
samplesperpixel = datashape[-1]
extrasamples = samplesperpixel - 1
if subfiletype & 0b100:
# FILETYPE_MASK
if not (bilevel and samplesperpixel == 1 and
photometric in (0, 1, 4)):
raise ValueError('invalid SubfileType MASK')
photometric = TIFF.PHOTOMETRIC.MASK
# normalize shape to 6D
assert len(datashape) in (5, 6)
if len(datashape) == 5:
datashape = datashape[:2] + (1,) + datashape[2:]
if datashape[0] == -1:
s0 = product(input_shape) // product(datashape[1:])
datashape = (s0,) + datashape[1:]
shape = datashape
if data is not None:
data = data.reshape(shape)
if tile and not volume:
tile = (1, tile[-2], tile[-1])
if photometric == PALETTE:
if (samplesperpixel != 1 or extrasamples or
shape[1] != 1 or shape[-1] != 1):
raise ValueError('invalid data shape for palette mode')
if photometric == RGB and samplesperpixel == 2:
raise ValueError('not a RGB image (samplesperpixel=2)')
if bilevel:
if compresstag not in (1, 32773):
raise ValueError('cannot compress bilevel image')
if tile:
raise ValueError('cannot save tiled bilevel image')
if photometric not in (0, 1, 4):
raise ValueError('cannot save bilevel image as %s' %
str(photometric))
datashape = list(datashape)
if datashape[-2] % 8:
datashape[-2] = datashape[-2] // 8 + 1
else:
datashape[-2] = datashape[-2] // 8
datashape = tuple(datashape)
assert datasize == product(datashape)
if data is not None:
data = numpy.packbits(data, axis=-2)
assert datashape[-2] == data.shape[-2]
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'ascii') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
strip_or_tile = 'Tile' if tile else 'Strip'
tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + 'ByteCounts']
tagoffsets = TIFF.TAG_NAMES[strip_or_tile + 'Offsets']
self._tagoffsets = tagoffsets
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value
# Append (code, ifdentry, ifdvalue, writeonce) to tags list
code = int(TIFF.TAG_NAMES.get(code, code))
try:
tifftype = TIFF.DATA_DTYPES[dtype]
except KeyError:
raise ValueError('unknown dtype %s' % dtype)
rawcount = count
if dtype == 's':
# strings
value = bytestr(value) + b'\0'
count = rawcount = len(value)
rawcount = value.find(b'\0\0')
if rawcount < 0:
rawcount = count
else:
rawcount += 1 # length of string without buffer
value = (value,)
elif isinstance(value, bytes):
# packed binary data
dtsize = struct.calcsize(dtype)
if len(value) % dtsize:
raise ValueError('invalid packed binary data')
count = len(value) // dtsize
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offsetformat, rawcount)]
ifdvalue = None
if struct.calcsize(dtype) * count <= offsetsize:
# value(s) can be written directly
if isinstance(value, bytes):
ifdentry.append(pack(valueformat, value))
elif count == 1:
if isinstance(value, (tuple, list, numpy.ndarray)):
value = value[0]
ifdentry.append(pack(valueformat, pack(dtype, value)))
else:
ifdentry.append(pack(valueformat,
pack(str(count)+dtype, *value)))
else:
# use offset to value(s)
ifdentry.append(pack(offsetformat, 0))
if isinstance(value, bytes):
ifdvalue = value
elif isinstance(value, numpy.ndarray):
assert value.size == count
assert value.dtype.char == dtype
ifdvalue = value.tostring()
elif isinstance(value, (tuple, list)):
ifdvalue = pack(str(count)+dtype, *value)
else:
ifdvalue = pack(dtype, value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
""""Return nominator and denominator from float or two integers."""
from fractions import Fraction # delayed import
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if description:
# user provided description
addtag('ImageDescription', 's', 0, description, writeonce=True)
# write shape and metadata to ImageDescription
self._metadata = {} if not metadata else metadata.copy()
if self._imagej:
description = imagej_description(
input_shape, shape[-1] in (3, 4), self._colormap is not None,
**self._metadata)
elif metadata or metadata == {}:
if self._truncate:
self._metadata.update(truncated=True)
description = json_description(input_shape, **self._metadata)
# elif metadata is None and self._truncate:
# raise ValueError('cannot truncate without writing metadata')
else:
description = None
if description:
# add 64 bytes buffer
# the image description might be updated later with the final shape
description = str2bytes(description, 'ascii')
description += b'\0' * 64
self._descriptionlen = len(description)
addtag('ImageDescription', 's', 0, description, writeonce=True)
if software:
addtag('Software', 's', 0, software, writeonce=True)
if datetime:
if isinstance(datetime, str):
if len(datetime) != 19 or datetime[16] != ':':
raise ValueError('invalid datetime string')
else:
try:
datetime = datetime.strftime('%Y:%m:%d %H:%M:%S')
except AttributeError:
datetime = self._now().strftime('%Y:%m:%d %H:%M:%S')
addtag('DateTime', 's', 0, datetime, writeonce=True)
addtag('Compression', 'H', 1, compresstag)
if predictor:
addtag('Predictor', 'H', 1, predictortag)
addtag('ImageWidth', 'I', 1, shape[-2])
addtag('ImageLength', 'I', 1, shape[-3])
if tile:
addtag('TileWidth', 'I', 1, tile[-1])
addtag('TileLength', 'I', 1, tile[-2])
if tile[0] > 1:
addtag('ImageDepth', 'I', 1, shape[-4])
addtag('TileDepth', 'I', 1, tile[0])
addtag('NewSubfileType', 'I', 1, subfiletype)
if not bilevel:
sampleformat = {'u': 1, 'i': 2, 'f': 3, 'c': 6}[datadtype.kind]
addtag('SampleFormat', 'H', samplesperpixel,
(sampleformat,) * samplesperpixel)
addtag('PhotometricInterpretation', 'H', 1, photometric.value)
if colormap is not None:
addtag('ColorMap', 'H', colormap.size, colormap)
addtag('SamplesPerPixel', 'H', 1, samplesperpixel)
if bilevel:
pass
elif planarconfig and samplesperpixel > 1:
addtag('PlanarConfiguration', 'H', 1, planarconfig.value)
addtag('BitsPerSample', 'H', samplesperpixel,
(datadtype.itemsize * 8,) * samplesperpixel)
else:
addtag('BitsPerSample', 'H', 1, datadtype.itemsize * 8)
if extrasamples:
if extrasamples_ is not None:
if extrasamples != len(extrasamples_):
raise ValueError('wrong number of extrasamples specified')
addtag('ExtraSamples', 'H', extrasamples, extrasamples_)
elif photometric == RGB and extrasamples == 1:
# Unassociated alpha channel
addtag('ExtraSamples', 'H', 1, 2)
else:
# Unspecified alpha channel
addtag('ExtraSamples', 'H', extrasamples, (0,) * extrasamples)
if resolution is not None:
addtag('XResolution', '2I', 1, rational(resolution[0]))
addtag('YResolution', '2I', 1, rational(resolution[1]))
if len(resolution) > 2:
unit = resolution[2]
unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit)
elif self._imagej:
unit = 1
else:
unit = 2
addtag('ResolutionUnit', 'H', 1, unit)
elif not self._imagej:
addtag('XResolution', '2I', 1, (1, 1))
addtag('YResolution', '2I', 1, (1, 1))
addtag('ResolutionUnit', 'H', 1, 1)
if ijmetadata:
for t in imagej_metadata_tag(ijmetadata, byteorder):
addtag(*t)
contiguous = not compress
if tile:
# one chunk per tile per plane
tiles = ((shape[2] + tile[0] - 1) // tile[0],
(shape[3] + tile[1] - 1) // tile[1],
(shape[4] + tile[2] - 1) // tile[2])
numtiles = product(tiles) * shape[1]
databytecounts = [
product(tile) * shape[-1] * datadtype.itemsize] * numtiles
addtag(tagbytecounts, offsetformat, numtiles, databytecounts)
addtag(tagoffsets, offsetformat, numtiles, [0] * numtiles)
contiguous = contiguous and product(tiles) == 1
if not contiguous:
# allocate tile buffer
chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype)
elif contiguous and (bilevel or rowsperstrip is None):
# one strip per plane
if bilevel:
databytecounts = [product(datashape[2:])] * shape[1]
else:
databytecounts = [
product(datashape[2:]) * datadtype.itemsize] * shape[1]
addtag(tagbytecounts, offsetformat, shape[1], databytecounts)
addtag(tagoffsets, offsetformat, shape[1], [0] * shape[1])
addtag('RowsPerStrip', 'I', 1, shape[-3])
else:
# use rowsperstrip
rowsize = product(shape[-2:]) * datadtype.itemsize
if rowsperstrip is None:
# compress ~64 KB chunks by default
rowsperstrip = 65536 // rowsize if compress else shape[-3]
if rowsperstrip < 1:
rowsperstrip = 1
elif rowsperstrip > shape[-3]:
rowsperstrip = shape[-3]
addtag('RowsPerStrip', 'I', 1, rowsperstrip)
numstrips1 = (shape[-3] + rowsperstrip - 1) // rowsperstrip
numstrips = numstrips1 * shape[1]
if compress:
databytecounts = [0] * numstrips
else:
# TODO: save bilevel data with rowsperstrip
stripsize = rowsperstrip * rowsize
databytecounts = [stripsize] * numstrips
stripsize -= rowsize * (numstrips1 * rowsperstrip - shape[-3])
for i in range(numstrips1-1, numstrips, numstrips1):
databytecounts[i] = stripsize
addtag(tagbytecounts, offsetformat, numstrips, databytecounts)
addtag(tagoffsets, offsetformat, numstrips, [0] * numstrips)
if data is None and not contiguous:
raise ValueError('cannot write non-contiguous empty file')
# add extra tags from user
for t in extratags:
addtag(*t)
# TODO: check TIFFReadDirectoryCheckOrder warning in files containing
# multiple tags of same code
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
fhpos = fh.tell()
if not (self._bigtiff or self._imagej) and fhpos + datasize > 2**32-1:
raise ValueError('data too large for standard TIFF file')
# if not compressed or multi-tiled, write the first IFD and then
# all data contiguously; else, write all IFDs and data interleaved
for pageindex in range(1 if contiguous else shape[0]):
ifdpos = fhpos
if ifdpos % 2:
# location of IFD must begin on a word boundary
fh.write(b'\0')
ifdpos += 1
# update pointer at ifdoffset
fh.seek(self._ifdoffset)
fh.write(pack(offsetformat, ifdpos))
fh.seek(ifdpos)
# create IFD in memory
if pageindex < 2:
ifd = io.BytesIO()
ifd.write(pack(tagnoformat, len(tags)))
tagoffset = ifd.tell()
ifd.write(b''.join(t[1] for t in tags))
ifdoffset = ifd.tell()
ifd.write(pack(offsetformat, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries
for tagindex, tag in enumerate(tags):
offset = tagoffset + tagindex * tagsize + offsetsize + 4
code = tag[0]
value = tag[2]
if value:
pos = ifd.tell()
if pos % 2:
# tag value is expected to begin on word boundary
ifd.write(b'\0')
pos += 1
ifd.seek(offset)
ifd.write(pack(offsetformat, ifdpos + pos))
ifd.seek(pos)
ifd.write(value)
if code == tagoffsets:
dataoffsetsoffset = offset, pos
elif code == tagbytecounts:
databytecountsoffset = offset, pos
elif code == 270 and value.endswith(b'\0\0\0\0'):
# image description buffer
self._descriptionoffset = ifdpos + pos
self._descriptionlenoffset = (
ifdpos + tagoffset + tagindex*tagsize + 4)
elif code == tagoffsets:
dataoffsetsoffset = offset, None
elif code == tagbytecounts:
databytecountsoffset = offset, None
ifdsize = ifd.tell()
if ifdsize % 2:
ifd.write(b'\0')
ifdsize += 1
# write IFD later when strip/tile bytecounts and offsets are known
fh.seek(ifdsize, 1)
# write image data
dataoffset = fh.tell()
skip = align - dataoffset % align
fh.seek(skip, 1)
dataoffset += skip
if contiguous:
if data is None:
fh.write_empty(datasize)
else:
fh.write_array(data)
elif tile:
if data is None:
fh.write_empty(numtiles * databytecounts[0])
else:
stripindex = 0
for plane in data[pageindex]:
for tz in range(tiles[0]):
for ty in range(tiles[1]):
for tx in range(tiles[2]):
c0 = min(tile[0], shape[2] - tz*tile[0])
c1 = min(tile[1], shape[3] - ty*tile[1])
c2 = min(tile[2], shape[4] - tx*tile[2])
chunk[c0:, c1:, c2:] = 0
chunk[:c0, :c1, :c2] = plane[
tz*tile[0]:tz*tile[0]+c0,
ty*tile[1]:ty*tile[1]+c1,
tx*tile[2]:tx*tile[2]+c2]
if compress:
t = compress(chunk)
fh.write(t)
databytecounts[stripindex] = len(t)
stripindex += 1
else:
fh.write_array(chunk)
# fh.flush()
elif compress:
# write one strip per rowsperstrip
assert data.shape[2] == 1 # not handling depth
numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip
stripindex = 0
for plane in data[pageindex]:
for i in range(numstrips):
strip = plane[0, i*rowsperstrip: (i+1)*rowsperstrip]
strip = compress(strip)
fh.write(strip)
databytecounts[stripindex] = len(strip)
stripindex += 1
else:
fh.write_array(data[pageindex])
# update strip/tile offsets
offset, pos = dataoffsetsoffset
ifd.seek(offset)
if pos:
ifd.write(pack(offsetformat, ifdpos + pos))
ifd.seek(pos)
offset = dataoffset
for size in databytecounts:
ifd.write(pack(offsetformat, offset))
offset += size
else:
ifd.write(pack(offsetformat, dataoffset))
if compress:
# update strip/tile bytecounts
offset, pos = databytecountsoffset
ifd.seek(offset)
if pos:
ifd.write(pack(offsetformat, ifdpos + pos))
ifd.seek(pos)
for size in databytecounts:
ifd.write(pack(offsetformat, size))
else:
ifd.write(pack(offsetformat, databytecounts[0]))
fhpos = fh.tell()
fh.seek(ifdpos)
fh.write(iogetbuffer(ifd))
fh.flush()
fh.seek(fhpos)
self._ifdoffset = ifdpos + ifdoffset
# remove tags that should be written only once
if pageindex == 0:
tags = [tag for tag in tags if not tag[-1]]
self._shape = shape
self._datashape = (1,) + input_shape
self._datadtype = datadtype
self._dataoffset = dataoffset
self._databytecounts = databytecounts
if contiguous:
# write remaining IFDs/tags later
self._tags = tags
# return offset and size of image data
if returnoffset:
return dataoffset, sum(databytecounts)
return None | Write numpy array and tags to TIFF file.
The data shape's last dimensions are assumed to be image depth,
height (length), width, and samples.
If a colormap is provided, the data's dtype must be uint8 or uint16
and the data values are indices into the last dimension of the
colormap.
If 'shape' and 'dtype' are specified, an empty array is saved.
This option cannot be used with compression or multiple tiles.
Image data are written uncompressed in one strip per plane by default.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The SampleFormat and BitsPerSample tags are derived from the data type.
Parameters
----------
data : numpy.ndarray or None
Input image array.
shape : tuple or None
Shape of the empty array to save. Used only if 'data' is None.
dtype : numpy.dtype or None
Data-type of the empty array to save. Used only if 'data' is None.
returnoffset : bool
If True and the image data in the file is memory-mappable, return
the offset and number of bytes of the image data in the file.
photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'}
The color space of the image data.
By default, this setting is inferred from the data shape and the
value of colormap.
For CFA images, DNG tags must be specified in 'extratags'.
planarconfig : {'CONTIG', 'SEPARATE'}
Specifies if samples are stored interleaved or in separate planes.
By default, this setting is inferred from the data shape.
If this parameter is set, extra samples are used to store grayscale
images.
'CONTIG': last dimension contains samples.
'SEPARATE': third last dimension contains samples.
extrasamples : tuple of {'UNSPECIFIED', 'ASSOCALPHA', 'UNASSALPHA'}
Defines the interpretation of extra components in pixels.
'UNSPECIFIED': no transparency information (default).
'ASSOCALPHA': single, true transparency with pre-multiplied color.
'UNASSALPHA': independent transparency masks.
tile : tuple of int
The shape (depth, length, width) of image tiles to write.
If None (default), image data are written in strips.
The tile length and width must be a multiple of 16.
If the tile depth is provided, the SGI ImageDepth and TileDepth
tags are used to save volume data.
Unless a single tile is used, tiles cannot be used to write
contiguous files.
Few software can read the SGI format, e.g. MeVisLab.
contiguous : bool
If True (default) and the data and parameters are compatible with
previous ones, if any, the image data are stored contiguously after
the previous one. In that case, 'photometric', 'planarconfig',
'rowsperstrip', are ignored. Metadata such as 'description',
'metadata', 'datetime', and 'extratags' are written to the first
page of a contiguous series only.
align : int
Byte boundary on which to align the image data in the file.
Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data.
Following contiguous writes are not aligned.
truncate : bool
If True, only write the first page including shape metadata if
possible (uncompressed, contiguous, not tiled).
Other TIFF readers will only be able to read part of the data.
compress : int or str or (str, int)
If 0 (default), data are written uncompressed.
If 0-9, the level of ADOBE_DEFLATE compression.
If a str, one of TIFF.COMPRESSION, e.g. 'LZMA' or 'ZSTD'.
If a tuple, first item is one of TIFF.COMPRESSION and second item
is compression level.
Compression cannot be used to write contiguous files.
rowsperstrip : int
The number of rows per strip. By default strips will be ~64 KB
if compression is enabled, else rowsperstrip is set to the image
length. Bilevel images are always stored in one strip per plane.
predictor : bool
If True, apply horizontal differencing or floating-point predictor
before compression.
colormap : numpy.ndarray
RGB color values for the corresponding data value.
Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16.
description : str
The subject of the image. Must be 7-bit ASCII. Cannot be used with
the ImageJ format. Saved with the first page only.
datetime : datetime, str, or bool
Date and time of image creation in '%Y:%m:%d %H:%M:%S' format or
datetime object. Else if True, the current date and time is used.
Saved with the first page only.
resolution : (float, float[, str]) or ((int, int), (int, int)[, str])
X and Y resolutions in pixels per resolution unit as float or
rational numbers. A third, optional parameter specifies the
resolution unit, which must be None (default for ImageJ),
'INCH' (default), or 'CENTIMETER'.
subfiletype : int
Bitfield to indicate the kind of data. Set bit 0 if the image
is a reduced-resolution version of another image. Set bit 1 if
the image is part of a multi-page image. Set bit 2 if the image
is transparency mask for another image (photometric must be
MASK, SamplesPerPixel and BitsPerSample must be 1).
software : str
Name of the software used to create the file. Must be 7-bit ASCII.
Saved with the first page only.
metadata : dict
Additional metadata to be saved along with shape information
in JSON or ImageJ formats in an ImageDescription tag.
If None, do not write a second ImageDescription tag.
Strings must be 7-bit ASCII. Saved with the first page only.
ijmetadata : dict
Additional metadata to be saved in application specific
IJMetadata and IJMetadataByteCounts tags. Refer to the
imagej_metadata_tag function for valid keys and values.
Saved with the first page only.
extratags : sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q.
count : int
Number of data values. Not used for string or byte string
values.
value : sequence
'Count' values compatible with 'dtype'.
Byte strings must contain count values of dtype packed as
binary data.
writeonce : bool
If True, the tag is written to the first page only. |
def simple_spend_p2sh(all_from_pubkeys, from_privkeys_to_use, to_address, to_satoshis,
change_address=None, min_confirmations=0, api_key=None, coin_symbol='btc'):
'''
Simple method to spend from a p2sh address.
all_from_pubkeys is a list of *all* pubkeys for the address in question.
from_privkeys_to_use is a list of all privkeys that will be used to sign the tx (and no more).
If the address is a 2-of-3 multisig and you supply 1 (or 3) from_privkeys_to_use this will break.
Signature takes place locally (client-side) after unsigned transaction is verified.
Returns the tx_hash of the newly broadcast tx.
A change_address *must* be specified, except for a sweep (set to_satoshis = -1)
Note that this currently only supports compressed private keys.
'''
assert is_valid_coin_symbol(coin_symbol), coin_symbol
assert isinstance(to_satoshis, int), to_satoshis
assert api_key, 'api_key required'
if change_address:
err_msg = '%s not a valid address for %s' % (change_address, coin_symbol)
assert is_valid_address_for_coinsymbol(change_address, coin_symbol), err_msg
else:
assert to_satoshis == -1, 'you must supply a change address or sweep'
err_msg = '%s not a valid address for %s' % (to_address, coin_symbol)
assert is_valid_address_for_coinsymbol(to_address, coin_symbol), err_msg
# TODO: calculate from address from pubkeys
# err_msg = '%s is not a p2sh address' % to_address
# assert from_address[0] in COIN_SYMBOL_MAPPINGS[coin_symbol]['multisig_prefix_list'], err_msg
assert isinstance(all_from_pubkeys, (list, tuple))
assert len(all_from_pubkeys) > 1
assert isinstance(from_privkeys_to_use, (list, tuple)), from_privkeys_to_use
for from_privkey in from_privkeys_to_use:
from_pubkey = compress(privkey_to_pubkey(from_privkey))
err_msg = '%s not in %s' % (from_pubkey, all_from_pubkeys)
assert from_pubkey in all_from_pubkeys
script_type = 'multisig-%s-of-%s' % (
len(from_privkeys_to_use),
len(all_from_pubkeys),
)
inputs = [
{
'pubkeys': all_from_pubkeys,
'script_type': script_type,
},
]
logger.info('inputs: %s' % inputs)
outputs = [{'address': to_address, 'value': to_satoshis}, ]
logger.info('outputs: %s' % outputs)
# will fail loudly if tx doesn't verify client-side
unsigned_tx = create_unsigned_tx(
inputs=inputs,
outputs=outputs,
# may build with no change address, but if so will verify change in next step
# done for extra security in case of client-side bug in change address generation
change_address=change_address,
coin_symbol=coin_symbol,
min_confirmations=min_confirmations,
verify_tosigntx=False, # will verify in next step
include_tosigntx=True,
api_key=api_key,
)
logger.info('unsigned_tx: %s' % unsigned_tx)
if 'errors' in unsigned_tx:
print('TX Error(s): Tx NOT Signed or Broadcast')
for error in unsigned_tx['errors']:
print(error['error'])
# Abandon
raise Exception('Build Unsigned TX Error')
tx_is_correct, err_msg = verify_unsigned_tx(
unsigned_tx=unsigned_tx,
inputs=None,
outputs=outputs,
sweep_funds=bool(to_satoshis == -1),
change_address=change_address,
coin_symbol=coin_symbol,
)
if not tx_is_correct:
print(unsigned_tx) # for debug
raise Exception('TX Verification Error: %s' % err_msg)
txs_to_sign, privkey_list, pubkey_list = [], [], []
for cnt, proposed_input in enumerate(unsigned_tx['tx']['inputs']):
# confirm that the input matches the all_from_pubkeys
err_msg = 'Invalid input: %s != %s' % (
proposed_input['addresses'],
all_from_pubkeys,
)
assert set(proposed_input['addresses']) == set(all_from_pubkeys), err_msg
# build items to pass to make_tx_signatures
for from_privkey in from_privkeys_to_use:
txs_to_sign.append(unsigned_tx['tosign'][cnt])
privkey_list.append(from_privkey)
pubkey_list.append(compress(privkey_to_pubkey(from_privkey)))
logger.info('txs_to_sign: %s' % txs_to_sign)
# logger.info('privkey_list: %s' % privkey_list)
logger.info('pubkey_list: %s' % pubkey_list)
# sign locally
tx_signatures = make_tx_signatures(
txs_to_sign=txs_to_sign,
privkey_list=privkey_list,
pubkey_list=pubkey_list,
)
logger.info('tx_signatures: %s' % tx_signatures)
# broadcast TX
broadcasted_tx = broadcast_signed_transaction(
unsigned_tx=unsigned_tx,
signatures=tx_signatures,
pubkeys=pubkey_list,
coin_symbol=coin_symbol,
api_key=api_key,
)
logger.info('broadcasted_tx: %s' % broadcasted_tx)
if 'errors' in broadcasted_tx:
print('TX Error(s): Tx May NOT Have Been Broadcast')
for error in broadcasted_tx['errors']:
print(error['error'])
print(broadcasted_tx)
return
return broadcasted_tx['tx']['hash'] | Simple method to spend from a p2sh address.
all_from_pubkeys is a list of *all* pubkeys for the address in question.
from_privkeys_to_use is a list of all privkeys that will be used to sign the tx (and no more).
If the address is a 2-of-3 multisig and you supply 1 (or 3) from_privkeys_to_use this will break.
Signature takes place locally (client-side) after unsigned transaction is verified.
Returns the tx_hash of the newly broadcast tx.
A change_address *must* be specified, except for a sweep (set to_satoshis = -1)
Note that this currently only supports compressed private keys. |
def get_phi_comps_from_recfile(recfile):
"""read the phi components from a record file by iteration
Parameters
----------
recfile : str
pest record file name
Returns
-------
iters : dict
nested dictionary of iteration number, {group,contribution}
"""
iiter = 1
iters = {}
f = open(recfile,'r')
while True:
line = f.readline()
if line == '':
break
if "starting phi for this iteration" in line.lower() or \
"final phi" in line.lower():
contributions = {}
while True:
line = f.readline()
if line == '':
break
if "contribution to phi" not in line.lower():
iters[iiter] = contributions
iiter += 1
break
raw = line.strip().split()
val = float(raw[-1])
group = raw[-3].lower().replace('\"', '')
contributions[group] = val
return iters | read the phi components from a record file by iteration
Parameters
----------
recfile : str
pest record file name
Returns
-------
iters : dict
nested dictionary of iteration number, {group,contribution} |
def _init_sub_groups(self, parent):
"""
Initialise sub-groups, and create any that do not already exist.
"""
if self._sub_groups:
for sub_group in self._sub_groups:
for component in split_path_components(sub_group):
fp = os.path.join(parent.full_path, component)
if os.path.exists(fp):
node = Node(name=component, parent=parent)
parent.children.append(node)
else:
node = parent.create_cgroup(component)
parent = node
self._init_children(node)
else:
self._init_children(parent) | Initialise sub-groups, and create any that do not already exist. |
def orbit(self, x1_px, y1_px, x2_px, y2_px):
"""
Causes the camera to "orbit" around the target point.
This is also called "tumbling" in some software packages.
"""
px_per_deg = self.vport_radius_px / float(self.orbit_speed)
radians_per_px = 1.0 / px_per_deg * np.pi / 180.0
t2p = self.position - self.target
M = Matrix4x4.rotation_around_origin((x1_px - x2_px) * radians_per_px,
self.ground)
t2p = M * t2p
self.up = M * self.up
right = (self.up ^ t2p).normalized()
M = Matrix4x4.rotation_around_origin((y1_px - y2_px) * radians_per_px,
right)
t2p = M * t2p
self.up = M * self.up
self.position = self.target + t2p | Causes the camera to "orbit" around the target point.
This is also called "tumbling" in some software packages. |
def count_residues(self, record, pdb_record):
'''Count the number of residues in the chains for the case.'''
mutations = self.get_record_mutations(record)
pdb_chains = set([m['Chain'] for m in mutations])
assert(len(pdb_chains) == 1) # we expect monomeric cases
pdb_chain = pdb_chains.pop()
return len(pdb_record.get('Chains', {}).get(pdb_chain, {}).get('Sequence', '')) | Count the number of residues in the chains for the case. |
def temp_water(self):
"""Use water to mask tirs and find 82.5 pctile
Equation 7 and 8 (Zhu and Woodcock, 2012)
Parameters
----------
is_water: ndarray, boolean
water mask, water is True, land is False
swir2: ndarray
tirs1: ndarray
Output
------
float:
82.5th percentile temperature over water
"""
# eq7
th_swir2 = 0.03
water = self.water_test()
clear_sky_water = water & (self.swir2 < th_swir2)
# eq8
clear_water_temp = self.tirs1.copy()
clear_water_temp[~clear_sky_water] = np.nan
clear_water_temp[~self.mask] = np.nan
pctl_clwt = np.nanpercentile(clear_water_temp, 82.5)
return pctl_clwt | Use water to mask tirs and find 82.5 pctile
Equation 7 and 8 (Zhu and Woodcock, 2012)
Parameters
----------
is_water: ndarray, boolean
water mask, water is True, land is False
swir2: ndarray
tirs1: ndarray
Output
------
float:
82.5th percentile temperature over water |
def _keynat(string):
"""A natural sort helper function for sort() and sorted()
without using regular expression.
"""
r = []
for c in string:
if c.isdigit():
if r and isinstance(r[-1], int):
r[-1] = r[-1] * 10 + int(c)
else:
r.append(int(c))
else:
r.append(9 + ord(c))
return r | A natural sort helper function for sort() and sorted()
without using regular expression. |
def delete(self, ids):
"""
Method to delete ipv4's by their ids
:param ids: Identifiers of ipv4's
:return: None
"""
url = build_uri_with_ids('api/v3/ipv4/%s/', ids)
return super(ApiIPv4, self).delete(url) | Method to delete ipv4's by their ids
:param ids: Identifiers of ipv4's
:return: None |
def cli(yamlfile, directory, out, classname, format):
""" Generate graphviz representations of the biolink model """
DotGenerator(yamlfile, format).serialize(classname=classname, dirname=directory, filename=out) | Generate graphviz representations of the biolink model |
def MotionBlur(k=5, angle=(0, 360), direction=(-1.0, 1.0), order=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that sharpens images and overlays the result with the original image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
k : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Kernel size to use.
* If a single int, then that value will be used for the height
and width of the kernel.
* If a tuple of two ints ``(a, b)``, then the kernel size will be
sampled from the interval ``[a..b]``.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the kernel
size for the nth image.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of the motion blur in degrees (clockwise, relative to top center direction).
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Forward/backward direction of the motion blur. Lower values towards -1.0 will point the motion blur towards
the back (with angle provided via `angle`). Higher values towards 1.0 will point the motion blur forward.
A value of 0.0 leads to a uniformly (but still angled) motion blur.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Interpolation order to use when rotating the kernel according to `angle`.
See :func:`imgaug.augmenters.geometric.Affine.__init__`.
Recommended to be ``0`` or ``1``, with ``0`` being faster, but less continuous/smooth as `angle` is changed,
particularly around multiple of 45 degrees.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.MotionBlur(k=15)
Create a motion blur augmenter with kernel size of 15x15.
>>> aug = iaa.MotionBlur(k=15, angle=[-45, 45])
Create a motion blur augmenter with kernel size of 15x15 and a blur angle of either -45 or 45 degrees (randomly
picked per image).
"""
# TODO allow (1, None) and set to identity matrix if k == 1
k_param = iap.handle_discrete_param(k, "k", value_range=(3, None), tuple_to_uniform=True, list_to_choice=True,
allow_floats=False)
angle_param = iap.handle_continuous_param(angle, "angle", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
direction_param = iap.handle_continuous_param(direction, "direction", value_range=(-1.0-1e-6, 1.0+1e-6),
tuple_to_uniform=True, list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
# avoid cyclic import between blur and geometric
from . import geometric as iaa_geometric
# force discrete for k_sample via int() in case of stochastic parameter
k_sample = int(k_param.draw_sample(random_state=random_state_func))
angle_sample = angle_param.draw_sample(random_state=random_state_func)
direction_sample = direction_param.draw_sample(random_state=random_state_func)
k_sample = k_sample if k_sample % 2 != 0 else k_sample + 1
direction_sample = np.clip(direction_sample, -1.0, 1.0)
direction_sample = (direction_sample + 1.0) / 2.0
matrix = np.zeros((k_sample, k_sample), dtype=np.float32)
matrix[:, k_sample//2] = np.linspace(float(direction_sample), 1.0 - float(direction_sample), num=k_sample)
rot = iaa_geometric.Affine(rotate=angle_sample, order=order)
matrix = (rot.augment_image((matrix * 255).astype(np.uint8)) / 255.0).astype(np.float32)
return [matrix/np.sum(matrix)] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return iaa_convolutional.Convolve(create_matrices, name=name, deterministic=deterministic,
random_state=random_state) | Augmenter that sharpens images and overlays the result with the original image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
k : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Kernel size to use.
* If a single int, then that value will be used for the height
and width of the kernel.
* If a tuple of two ints ``(a, b)``, then the kernel size will be
sampled from the interval ``[a..b]``.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the kernel
size for the nth image.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of the motion blur in degrees (clockwise, relative to top center direction).
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Forward/backward direction of the motion blur. Lower values towards -1.0 will point the motion blur towards
the back (with angle provided via `angle`). Higher values towards 1.0 will point the motion blur forward.
A value of 0.0 leads to a uniformly (but still angled) motion blur.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Interpolation order to use when rotating the kernel according to `angle`.
See :func:`imgaug.augmenters.geometric.Affine.__init__`.
Recommended to be ``0`` or ``1``, with ``0`` being faster, but less continuous/smooth as `angle` is changed,
particularly around multiple of 45 degrees.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.MotionBlur(k=15)
Create a motion blur augmenter with kernel size of 15x15.
>>> aug = iaa.MotionBlur(k=15, angle=[-45, 45])
Create a motion blur augmenter with kernel size of 15x15 and a blur angle of either -45 or 45 degrees (randomly
picked per image). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.