code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def timeseries_reactive(self):
"""
Reactive power time series in kvar.
Parameters
-----------
timeseries_reactive : :pandas:`pandas.Seriese<series>`
Series containing reactive power in kvar.
Returns
-------
:pandas:`pandas.Series<series>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `load_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned.
"""
if self._timeseries_reactive is None:
# if normalized reactive power time series are given, they are
# scaled by the annual consumption; if none are given reactive
# power time series are calculated timeseries getter using a given
# power factor
if self.grid.network.timeseries.load_reactive_power is not None:
self.power_factor = 'not_applicable'
self.reactive_power_mode = 'not_applicable'
ts_total = None
for sector in self.consumption.keys():
consumption = self.consumption[sector]
try:
ts = self.grid.network.timeseries.load_reactive_power[
sector].to_frame('q')
except KeyError:
logger.exception(
"No timeseries for load of type {} "
"given.".format(sector))
raise
ts = ts * consumption
if ts_total is None:
ts_total = ts
else:
ts_total.q += ts.q
return ts_total
else:
return None
else:
return self._timeseries_reactive
|
Reactive power time series in kvar.
Parameters
-----------
timeseries_reactive : :pandas:`pandas.Seriese<series>`
Series containing reactive power in kvar.
Returns
-------
:pandas:`pandas.Series<series>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `load_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned.
|
def usearch61_smallmem_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
rev=False,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="smallmem_clustered.log",
sizeout=False,
consout_filepath=None):
""" Performs usearch61 de novo clustering via cluster_smallmem option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
rev: will enable reverse strand matching if True
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
sizeout: If True, will save abundance data in output fasta labels.
consout_filepath: Needs to be set to save clustered consensus fasta
filepath used for chimera checking.
"""
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--cluster_smallmem': intermediate_fasta,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--usersort': True
}
if sizeorder:
params['--sizeorder'] = True
if not remove_usearch_logs:
params['--log'] = log_filepath
if rev:
params['--strand'] = 'both'
else:
params['--strand'] = 'plus'
if sizeout:
params['--sizeout'] = True
if consout_filepath:
params['--consout'] = consout_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result
|
Performs usearch61 de novo clustering via cluster_smallmem option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
rev: will enable reverse strand matching if True
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
sizeout: If True, will save abundance data in output fasta labels.
consout_filepath: Needs to be set to save clustered consensus fasta
filepath used for chimera checking.
|
def display(self, image):
"""
Takes a 32-bit RGBA :py:mod:`PIL.Image` and dumps it to the daisy-chained
APA102 neopixels. If a pixel is not fully opaque, the alpha channel
value is used to set the brightness of the respective RGB LED.
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
self._last_image = image.copy()
# Send zeros to reset, then pixel values then zeros at end
sz = image.width * image.height * 4
buf = bytearray(sz * 3)
m = self._mapping
for idx, (r, g, b, a) in enumerate(image.getdata()):
offset = sz + m[idx] * 4
brightness = (a >> 4) if a != 0xFF else self._brightness
buf[offset] = (0xE0 | brightness)
buf[offset + 1] = b
buf[offset + 2] = g
buf[offset + 3] = r
self._serial_interface.data(list(buf))
|
Takes a 32-bit RGBA :py:mod:`PIL.Image` and dumps it to the daisy-chained
APA102 neopixels. If a pixel is not fully opaque, the alpha channel
value is used to set the brightness of the respective RGB LED.
|
def dfa_word_acceptance(dfa: dict, word: list) -> bool:
""" Checks if a given **word** is accepted by a DFA,
returning True/false.
The word w is accepted by a DFA if DFA has an accepting run
on w. Since A is deterministic,
:math:`w ∈ L(A)` if and only if :math:`ρ(s_0 , w) ∈ F` .
:param dict dfa: input DFA;
:param list word: list of actions ∈ dfa['alphabet'].
:return: *(bool)*, True if the word is accepted, False in the
other case.
"""
current_state = dfa['initial_state']
for action in word:
if (current_state, action) in dfa['transitions']:
current_state = dfa['transitions'][current_state, action]
else:
return False
if current_state in dfa['accepting_states']:
return True
else:
return False
|
Checks if a given **word** is accepted by a DFA,
returning True/false.
The word w is accepted by a DFA if DFA has an accepting run
on w. Since A is deterministic,
:math:`w ∈ L(A)` if and only if :math:`ρ(s_0 , w) ∈ F` .
:param dict dfa: input DFA;
:param list word: list of actions ∈ dfa['alphabet'].
:return: *(bool)*, True if the word is accepted, False in the
other case.
|
def start_request(self, headers, *, end_stream=False):
"""
Start a request by sending given headers on a new stream, and return
the ID of the new stream.
This may block until the underlying transport becomes writable, and
the number of concurrent outbound requests (open outbound streams) is
less than the value of peer config MAX_CONCURRENT_STREAMS.
The completion of the call to this method does not mean the request is
successfully delivered - data is only correctly stored in a buffer to
be sent. There's no guarantee it is truly delivered.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a request without body, set `end_stream` to
`True` (default `False`).
:return: Stream ID as a integer, used for further communication.
"""
yield from _wait_for_events(self._resumed, self._stream_creatable)
stream_id = self._conn.get_next_available_stream_id()
self._priority.insert_stream(stream_id)
self._priority.block(stream_id)
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush()
return stream_id
|
Start a request by sending given headers on a new stream, and return
the ID of the new stream.
This may block until the underlying transport becomes writable, and
the number of concurrent outbound requests (open outbound streams) is
less than the value of peer config MAX_CONCURRENT_STREAMS.
The completion of the call to this method does not mean the request is
successfully delivered - data is only correctly stored in a buffer to
be sent. There's no guarantee it is truly delivered.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a request without body, set `end_stream` to
`True` (default `False`).
:return: Stream ID as a integer, used for further communication.
|
def check_database_connected(db):
"""
A built-in check to see if connecting to the configured default
database backend succeeds.
It's automatically added to the list of Dockerflow checks if a
:class:`~flask_sqlalchemy.SQLAlchemy` object is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
dockerflow = Dockerflow(app, db=db)
"""
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
errors = []
try:
with db.engine.connect() as connection:
connection.execute('SELECT 1;')
except DBAPIError as e:
msg = 'DB-API error: {!s}'.format(e)
errors.append(Error(msg, id=health.ERROR_DB_API_EXCEPTION))
except SQLAlchemyError as e:
msg = 'Database misconfigured: "{!s}"'.format(e)
errors.append(Error(msg, id=health.ERROR_SQLALCHEMY_EXCEPTION))
return errors
|
A built-in check to see if connecting to the configured default
database backend succeeds.
It's automatically added to the list of Dockerflow checks if a
:class:`~flask_sqlalchemy.SQLAlchemy` object is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
dockerflow = Dockerflow(app, db=db)
|
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
|
S.reverse() -- reverse *IN PLACE*
|
def _set_cpu_queue_info_state(self, v, load=False):
"""
Setter method for cpu_queue_info_state, mapped from YANG variable /cpu_queue_info_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_queue_info_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_queue_info_state() directly.
YANG Description: QoS CPU Queue info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cpu_queue_info_state.cpu_queue_info_state, is_container='container', presence=False, yang_name="cpu-queue-info-state", rest_name="cpu-queue-info-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-cpu-queue-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpu_queue_info_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cpu_queue_info_state.cpu_queue_info_state, is_container='container', presence=False, yang_name="cpu-queue-info-state", rest_name="cpu-queue-info-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-cpu-queue-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True)""",
})
self.__cpu_queue_info_state = t
if hasattr(self, '_set'):
self._set()
|
Setter method for cpu_queue_info_state, mapped from YANG variable /cpu_queue_info_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpu_queue_info_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpu_queue_info_state() directly.
YANG Description: QoS CPU Queue info
|
def get_data(self):
"""Returns data from each field."""
result = {}
for field in self.fields:
result[field.name] = self.data.get(field.name)
return result
|
Returns data from each field.
|
def button(self):
"""The button that triggered this event.
For events that are not of type
:attr:`~libinput.constant.EventType.TABLET_TOOL_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
int: The button triggering this event.
"""
if self.type != EventType.TABLET_TOOL_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_tool_get_button(
self._handle)
|
The button that triggered this event.
For events that are not of type
:attr:`~libinput.constant.EventType.TABLET_TOOL_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
int: The button triggering this event.
|
def list_nodes_full(call=None):
'''
List nodes, with all available information
CLI Example:
.. code-block:: bash
salt-cloud -F
'''
response = _query('grid', 'server/list')
ret = {}
for item in response['list']:
name = item['name']
ret[name] = item
ret[name]['image_info'] = item['image']
ret[name]['image'] = item['image']['friendlyName']
ret[name]['size'] = item['ram']['name']
ret[name]['public_ips'] = [item['ip']['ip']]
ret[name]['private_ips'] = []
ret[name]['state_info'] = item['state']
if 'active' in item['state']['description']:
ret[name]['state'] = 'RUNNING'
return ret
|
List nodes, with all available information
CLI Example:
.. code-block:: bash
salt-cloud -F
|
def _finalCleanup(self):
"""
Clean up all of our connections by issuing application-level close and
stop notifications, sending hail-mary final FIN packets (which may not
reach the other end, but nevertheless can be useful) when possible.
"""
for conn in self._connections.values():
conn.releaseConnectionResources()
assert not self._connections
|
Clean up all of our connections by issuing application-level close and
stop notifications, sending hail-mary final FIN packets (which may not
reach the other end, but nevertheless can be useful) when possible.
|
def gps_message_arrived(self, m):
'''adjust time base from GPS message'''
# msec-style GPS message?
gps_week = getattr(m, 'Week', None)
gps_timems = getattr(m, 'TimeMS', None)
if gps_week is None:
# usec-style GPS message?
gps_week = getattr(m, 'GWk', None)
gps_timems = getattr(m, 'GMS', None)
if gps_week is None:
if getattr(m, 'GPSTime', None) is not None:
# PX4-style timestamp; we've only been called
# because we were speculatively created in case no
# better clock was found.
return;
t = self._gpsTimeToTime(gps_week, gps_timems)
deltat = t - self.timebase
if deltat <= 0:
return
for type in self.counts_since_gps:
rate = self.counts_since_gps[type] / deltat
if rate > self.msg_rate.get(type, 0):
self.msg_rate[type] = rate
self.msg_rate['IMU'] = 50.0
self.timebase = t
self.counts_since_gps = {}
|
adjust time base from GPS message
|
def move_identity(session, identity, uidentity):
"""Move an identity to a unique identity.
Shifts `identity` to the unique identity given in
`uidentity`. The function returns whether the operation
was executed successfully.
When `uidentity` is the unique identity currently related
to `identity`, this operation does not have any effect and
`False` will be returned as result.
:param session: database session
:param identity: identity to be moved
:param uidentity: unique identity where `identity` will be moved
:return: `True` if the identity was moved; `False` in any other
case
"""
if identity.uuid == uidentity.uuid:
return False
old_uidentity = identity.uidentity
identity.uidentity = uidentity
last_modified = datetime.datetime.utcnow()
old_uidentity.last_modified = last_modified
uidentity.last_modified = last_modified
identity.last_modified = last_modified
session.add(uidentity)
session.add(old_uidentity)
return True
|
Move an identity to a unique identity.
Shifts `identity` to the unique identity given in
`uidentity`. The function returns whether the operation
was executed successfully.
When `uidentity` is the unique identity currently related
to `identity`, this operation does not have any effect and
`False` will be returned as result.
:param session: database session
:param identity: identity to be moved
:param uidentity: unique identity where `identity` will be moved
:return: `True` if the identity was moved; `False` in any other
case
|
def inquire_property(name, doc=None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
|
Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
|
def reverse_transform(self, col):
"""Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
output = pd.DataFrame(index=col.index)
output[self.col_name] = col.apply(self.safe_round, axis=1)
if self.subtype == 'int':
output[self.col_name] = output[self.col_name].astype(int)
return output
|
Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def delete(name):
'''Delete the given virtual folder. This operation is irreversible!
NAME: Name of a virtual folder.
'''
with Session() as session:
try:
session.VFolder(name).delete()
print_done('Deleted.')
except Exception as e:
print_error(e)
sys.exit(1)
|
Delete the given virtual folder. This operation is irreversible!
NAME: Name of a virtual folder.
|
def autoconf(self):
"""Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
"""
serverInfo = MemcachedInfo(self._host, self._port, self._socket_file)
return (serverInfo is not None)
|
Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
|
def stream_events(signals: Sequence[Signal], filter: Callable[[T_Event], bool] = None, *,
max_queue_size: int = 0) -> AsyncIterator[T_Event]:
"""
Return an async generator that yields events from the given signals.
Only events that pass the filter callable (if one has been given) are returned.
If no filter function was given, all events are yielded from the generator.
:param signals: the signals to get events from
:param filter: a callable that takes an event object as an argument and returns ``True`` if
the event should pass, ``False`` if not
:param max_queue_size: maximum size of the queue, after which it will start to drop events
"""
@async_generator
async def streamer():
try:
while True:
event = await queue.get()
if filter is None or filter(event):
await yield_(event)
finally:
cleanup()
def cleanup():
nonlocal queue
if queue is not None:
for signal in signals:
signal.disconnect(queue.put_nowait)
queue = None
assert check_argument_types()
queue = Queue(max_queue_size) # type: Queue[T_Event]
for signal in signals:
signal.connect(queue.put_nowait)
gen = [streamer()] # this is to allow the reference count to drop to 0
weakref.finalize(gen[0], cleanup)
return gen.pop()
|
Return an async generator that yields events from the given signals.
Only events that pass the filter callable (if one has been given) are returned.
If no filter function was given, all events are yielded from the generator.
:param signals: the signals to get events from
:param filter: a callable that takes an event object as an argument and returns ``True`` if
the event should pass, ``False`` if not
:param max_queue_size: maximum size of the queue, after which it will start to drop events
|
def from_array(filename, data, iline=189,
xline=193,
format=SegySampleFormat.IBM_FLOAT_4_BYTE,
dt=4000,
delrt=0):
""" Create a new SEGY file from an n-dimentional array. Create a structured
SEGY file with defaulted headers from a 2-, 3- or 4-dimensional array.
ilines, xlines, offsets and samples are inferred from the size of the
array. Please refer to the documentation for functions from_array2D,
from_array3D and from_array4D to see how the arrays are interpreted.
Structure-defining fields in the binary header and in the traceheaders are
set accordingly. Such fields include, but are not limited to iline, xline
and offset. The file also contains a defaulted textual header.
Parameters
----------
filename : string-like
Path to new file
data : 2-,3- or 4-dimensional array-like
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
format : int or segyio.SegySampleFormat
Sample format field in the trace header. Defaults to IBM float 4 byte
dt : int-like
sample interval
delrt : int-like
Notes
-----
.. versionadded:: 1.8
Examples
--------
Create a file from a 3D array, open it and read an iline:
>>> segyio.tools.from_array(path, array3d)
>>> segyio.open(path, mode) as f:
... iline = f.iline[0]
...
"""
dt = int(dt)
delrt = int(delrt)
data = np.asarray(data)
dimensions = len(data.shape)
if dimensions not in range(2, 5):
problem = "Expected 2, 3, or 4 dimensions, {} was given".format(dimensions)
raise ValueError(problem)
spec = segyio.spec()
spec.iline = iline
spec.xline = xline
spec.format = format
spec.sorting = TraceSortingFormat.INLINE_SORTING
if dimensions == 2:
spec.ilines = [1]
spec.xlines = list(range(1, np.size(data,0) + 1))
spec.samples = list(range(np.size(data,1)))
spec.tracecount = np.size(data, 1)
if dimensions == 3:
spec.ilines = list(range(1, np.size(data, 0) + 1))
spec.xlines = list(range(1, np.size(data, 1) + 1))
spec.samples = list(range(np.size(data, 2)))
if dimensions == 4:
spec.ilines = list(range(1, np.size(data, 0) + 1))
spec.xlines = list(range(1, np.size(data, 1) + 1))
spec.offsets = list(range(1, np.size(data, 2)+ 1))
spec.samples = list(range(np.size(data,3)))
samplecount = len(spec.samples)
with segyio.create(filename, spec) as f:
tr = 0
for ilno, il in enumerate(spec.ilines):
for xlno, xl in enumerate(spec.xlines):
for offno, off in enumerate(spec.offsets):
f.header[tr] = {
segyio.su.tracf : tr,
segyio.su.cdpt : tr,
segyio.su.offset : off,
segyio.su.ns : samplecount,
segyio.su.dt : dt,
segyio.su.delrt : delrt,
segyio.su.iline : il,
segyio.su.xline : xl
}
if dimensions == 2: f.trace[tr] = data[tr, :]
if dimensions == 3: f.trace[tr] = data[ilno, xlno, :]
if dimensions == 4: f.trace[tr] = data[ilno, xlno, offno, :]
tr += 1
f.bin.update(
tsort=TraceSortingFormat.INLINE_SORTING,
hdt=dt,
dto=dt
)
|
Create a new SEGY file from an n-dimentional array. Create a structured
SEGY file with defaulted headers from a 2-, 3- or 4-dimensional array.
ilines, xlines, offsets and samples are inferred from the size of the
array. Please refer to the documentation for functions from_array2D,
from_array3D and from_array4D to see how the arrays are interpreted.
Structure-defining fields in the binary header and in the traceheaders are
set accordingly. Such fields include, but are not limited to iline, xline
and offset. The file also contains a defaulted textual header.
Parameters
----------
filename : string-like
Path to new file
data : 2-,3- or 4-dimensional array-like
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
format : int or segyio.SegySampleFormat
Sample format field in the trace header. Defaults to IBM float 4 byte
dt : int-like
sample interval
delrt : int-like
Notes
-----
.. versionadded:: 1.8
Examples
--------
Create a file from a 3D array, open it and read an iline:
>>> segyio.tools.from_array(path, array3d)
>>> segyio.open(path, mode) as f:
... iline = f.iline[0]
...
|
def send(msg_type, send_async=False, *args, **kwargs):
"""
Constructs a message class and sends the message.
Defaults to sending synchronously. Set send_async=True to send
asynchronously.
Args:
:msg_type: (str) the type of message to send, i.e. 'Email'
:send_async: (bool) default is False, set True to send asynchronously.
:kwargs: (dict) keywords arguments that are required for the
various message types. See docstrings for each type.
i.e. help(messages.Email), help(messages.Twilio), etc.
Example:
>>> kwargs = {
from_: 'me@here.com',
to: 'you@there.com',
auth: 'yourPassword',
subject: 'Email Subject',
body: 'Your message to send',
attachments: ['filepath1', 'filepath2'],
}
>>> messages.send('email', **kwargs)
Message sent...
"""
message = message_factory(msg_type, *args, **kwargs)
try:
if send_async:
message.send_async()
else:
message.send()
except MessageSendError as e:
err_exit("Unable to send message: ", e)
|
Constructs a message class and sends the message.
Defaults to sending synchronously. Set send_async=True to send
asynchronously.
Args:
:msg_type: (str) the type of message to send, i.e. 'Email'
:send_async: (bool) default is False, set True to send asynchronously.
:kwargs: (dict) keywords arguments that are required for the
various message types. See docstrings for each type.
i.e. help(messages.Email), help(messages.Twilio), etc.
Example:
>>> kwargs = {
from_: 'me@here.com',
to: 'you@there.com',
auth: 'yourPassword',
subject: 'Email Subject',
body: 'Your message to send',
attachments: ['filepath1', 'filepath2'],
}
>>> messages.send('email', **kwargs)
Message sent...
|
def shift_coordinate_grid(self, x_shift, y_shift, pixel_unit=False):
"""
shifts the coordinate system
:param x_shif: shift in x (or RA)
:param y_shift: shift in y (or DEC)
:param pixel_unit: bool, if True, units of pixels in input, otherwise RA/DEC
:return: updated data class with change in coordinate system
"""
if pixel_unit is True:
ra_shift, dec_shift = self.map_pix2coord(x_shift, y_shift)
else:
ra_shift, dec_shift = x_shift, y_shift
self._ra_at_xy_0 += ra_shift
self._dec_at_xy_0 += dec_shift
self._x_at_radec_0, self._y_at_radec_0 = util.map_coord2pix(-self._ra_at_xy_0, -self._dec_at_xy_0, 0, 0,
self._Ma2pix)
|
shifts the coordinate system
:param x_shif: shift in x (or RA)
:param y_shift: shift in y (or DEC)
:param pixel_unit: bool, if True, units of pixels in input, otherwise RA/DEC
:return: updated data class with change in coordinate system
|
def triangulize(image, tile_size):
"""Processes the given image by breaking it down into tiles of the given
size and applying a triangular effect to each tile. Returns the processed
image as a PIL Image object.
The image can be given as anything suitable for passing to `Image.open`
(ie, the path to an image or as a file-like object containing image data).
If tile_size is 0, the tile size will be guessed based on the image
size. It will also be adjusted to be divisible by 2 if it is not already.
"""
if isinstance(image, basestring) or hasattr(image, 'read'):
image = Image.open(image)
assert isinstance(tile_size, int)
# Make sure we have a usable tile size, by guessing based on image size
# and making sure it's a multiple of two.
if tile_size == 0:
tile_size = guess_tile_size(image)
if tile_size % 2 != 0:
tile_size = (tile_size / 2) * 2
logging.info('Input image size: %r', image.size)
logging.info('Tile size: %r', tile_size)
# Preprocess image to make sure it's at a size we can handle
image = prep_image(image, tile_size)
logging.info('Prepped image size: %r', image.size)
# Get pixmap (for direct pixel access) and draw objects for the image.
pix = image.load()
draw = ImageDraw.Draw(image)
# Process the image, tile by tile
for x, y in iter_tiles(image, tile_size):
process_tile(x, y, tile_size, pix, draw, image)
return image
|
Processes the given image by breaking it down into tiles of the given
size and applying a triangular effect to each tile. Returns the processed
image as a PIL Image object.
The image can be given as anything suitable for passing to `Image.open`
(ie, the path to an image or as a file-like object containing image data).
If tile_size is 0, the tile size will be guessed based on the image
size. It will also be adjusted to be divisible by 2 if it is not already.
|
def search(self, index_name, query):
"""Search the given index_name with the given ELS query.
Args:
index_name: Name of the Index
query: The string to be searched.
Returns:
List of results.
Raises:
RuntimeError: When the search query fails.
"""
try:
results = self.els_search.search(index=index_name, body=query)
return results
except Exception, error:
error_str = 'Query failed: %s\n' % str(error)
error_str += '\nIs there a dynamic script in the query?, see www.elasticsearch.org'
print error_str
raise RuntimeError(error_str)
|
Search the given index_name with the given ELS query.
Args:
index_name: Name of the Index
query: The string to be searched.
Returns:
List of results.
Raises:
RuntimeError: When the search query fails.
|
def searchForGroups(self, name, limit=10):
"""
Find and get group thread by its name
:param name: Name of the group thread
:param limit: The max. amount of groups to fetch
:return: :class:`models.Group` objects, ordered by relevance
:rtype: list
:raises: FBchatException if request failed
"""
params = {"search": name, "limit": limit}
j = self.graphql_request(GraphQL(query=GraphQL.SEARCH_GROUP, params=params))
return [Group._from_graphql(node) for node in j["viewer"]["groups"]["nodes"]]
|
Find and get group thread by its name
:param name: Name of the group thread
:param limit: The max. amount of groups to fetch
:return: :class:`models.Group` objects, ordered by relevance
:rtype: list
:raises: FBchatException if request failed
|
def avail_platforms():
'''
Return which platforms are available
CLI Example:
.. code-block:: bash
salt myminion genesis.avail_platforms
'''
ret = {}
for platform in CMD_MAP:
ret[platform] = True
for cmd in CMD_MAP[platform]:
if not salt.utils.path.which(cmd):
ret[platform] = False
return ret
|
Return which platforms are available
CLI Example:
.. code-block:: bash
salt myminion genesis.avail_platforms
|
def add_intercept_term(self, x):
"""
Adds a column of ones to estimate the intercept term for
separation boundary
"""
nr_x,nr_f = x.shape
intercept = np.ones([nr_x,1])
x = np.hstack((intercept,x))
return x
|
Adds a column of ones to estimate the intercept term for
separation boundary
|
def create_collection(self, name, codec_options=None,
read_preference=None, write_concern=None,
read_concern=None, **kwargs):
"""Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this method. Supported
options vary with MongoDB release. Some examples include:
- "size": desired initial size for the collection (in
bytes). For capped collections this size is the max
size of the collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `name`: the name of the collection to create
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Database` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Database` is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Database` is
used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Database` is
used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.4
Added the collation option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
.. versionchanged:: 2.2
Removed deprecated argument: options
"""
if name in self.collection_names():
raise CollectionInvalid("collection %s already exists" % name)
return Collection(self, name, True, codec_options,
read_preference, write_concern,
read_concern, **kwargs)
|
Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this method. Supported
options vary with MongoDB release. Some examples include:
- "size": desired initial size for the collection (in
bytes). For capped collections this size is the max
size of the collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `name`: the name of the collection to create
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Database` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Database` is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Database` is
used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Database` is
used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.4
Added the collation option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
.. versionchanged:: 2.2
Removed deprecated argument: options
|
def walk_dir(path, args, state):
"""
Check all files in `path' to see if there is any requests that
we should send out on the bus.
"""
if args.debug:
sys.stderr.write("Walking %s\n" % path)
for root, _dirs, files in os.walk(path):
if not safe_process_files(root, files, args, state):
return False
if state.should_quit():
return False
return True
|
Check all files in `path' to see if there is any requests that
we should send out on the bus.
|
def as_dict(self):
"""
Bson-serializable dict representation of the VoronoiContainer.
:return: dictionary that is BSON-encodable
"""
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
# "neighbors_lists": self.neighbors_lists,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor}
|
Bson-serializable dict representation of the VoronoiContainer.
:return: dictionary that is BSON-encodable
|
def add_model(self, *args, **kwargs):
# type: (*Any, **Any) -> Part
"""Add a new child model to this model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:return: a :class:`Part` of category `MODEL`
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_model(self, *args, **kwargs)
|
Add a new child model to this model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:return: a :class:`Part` of category `MODEL`
|
def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2012
|
Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
|
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
v = strip_quotes(v)
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
v = http2time(strip_quotes(v)) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
|
Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
|
def rearrange_jupytext_metadata(metadata):
"""Convert the jupytext_formats metadata entry to jupytext/formats, etc. See #91"""
# Backward compatibility with nbrmd
for key in ['nbrmd_formats', 'nbrmd_format_version']:
if key in metadata:
metadata[key.replace('nbrmd', 'jupytext')] = metadata.pop(key)
jupytext_metadata = metadata.pop('jupytext', {})
if 'jupytext_formats' in metadata:
jupytext_metadata['formats'] = metadata.pop('jupytext_formats')
if 'jupytext_format_version' in metadata:
jupytext_metadata['text_representation'] = {'format_version': metadata.pop('jupytext_format_version')}
if 'main_language' in metadata:
jupytext_metadata['main_language'] = metadata.pop('main_language')
for entry in ['encoding', 'executable']:
if entry in metadata:
jupytext_metadata[entry] = metadata.pop(entry)
filters = jupytext_metadata.pop('metadata_filter', {})
if 'notebook' in filters:
jupytext_metadata['notebook_metadata_filter'] = filters['notebook']
if 'cells' in filters:
jupytext_metadata['cell_metadata_filter'] = filters['cells']
for filter_level in ['notebook_metadata_filter', 'cell_metadata_filter']:
if filter_level in jupytext_metadata:
jupytext_metadata[filter_level] = metadata_filter_as_string(jupytext_metadata[filter_level])
if jupytext_metadata.get('text_representation', {}).get('jupytext_version', '').startswith('0.'):
formats = jupytext_metadata.get('formats')
if formats:
jupytext_metadata['formats'] = ','.join(['.' + fmt if fmt.rfind('.') > 0 else fmt
for fmt in formats.split(',')])
# auto to actual extension
formats = jupytext_metadata.get('formats')
if formats:
jupytext_metadata['formats'] = short_form_multiple_formats(long_form_multiple_formats(formats, metadata))
if jupytext_metadata:
metadata['jupytext'] = jupytext_metadata
|
Convert the jupytext_formats metadata entry to jupytext/formats, etc. See #91
|
def _lookup_identity_names(self):
"""
Batch resolve identities to usernames.
Returns a dict mapping IDs to Usernames
"""
id_batch_size = 100
# fetch in batches of 100, store in a dict
ac = get_auth_client()
self._resolved_map = {}
for i in range(0, len(self.identity_ids), id_batch_size):
chunk = self.identity_ids[i : i + id_batch_size]
resolved_result = ac.get_identities(ids=chunk)
for x in resolved_result["identities"]:
self._resolved_map[x["id"]] = x["username"]
|
Batch resolve identities to usernames.
Returns a dict mapping IDs to Usernames
|
def remote_tags(url):
# type: (str) -> list
"""
List all available remote tags naturally sorted as version strings
:rtype: list
:param url: Remote URL of the repository
:return: list of available tags
"""
tags = []
remote_git = Git()
for line in remote_git.ls_remote('--tags', '--quiet', url).split('\n'):
hash_ref = line.split('\t')
tags.append(hash_ref[1][10:].replace('^{}',''))
return natsorted(tags)
|
List all available remote tags naturally sorted as version strings
:rtype: list
:param url: Remote URL of the repository
:return: list of available tags
|
def add_size_info (self):
"""Set size of URL content (if any)..
Should be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
if self.size > maxbytes:
self.add_warning(
_("Content size %(size)s is larger than %(maxbytes)s.") %
dict(size=strformat.strsize(self.size),
maxbytes=strformat.strsize(maxbytes)),
tag=WARN_URL_CONTENT_SIZE_TOO_LARGE)
|
Set size of URL content (if any)..
Should be overridden in subclasses.
|
def start(self):
"""Start scheduling"""
self.stop()
self.initialize()
self.handle = self.loop.call_at(self.get_next(), self.call_next)
|
Start scheduling
|
def should_filter(items):
"""Check if we should do damage filtering on somatic calling with low frequency events.
"""
return (vcfutils.get_paired(items) is not None and
any("damage_filter" in dd.get_tools_on(d) for d in items))
|
Check if we should do damage filtering on somatic calling with low frequency events.
|
def _update_digital_forms(self, **update_props):
"""
Update operation for ISO Digital Forms metadata
:see: gis_metadata.utils._complex_definitions[DIGITAL_FORMS]
"""
digital_forms = wrap_value(update_props['values'])
# Update all Digital Form properties: distributionFormat*
xpath_map = self._data_structures[update_props['prop']]
dist_format_props = ('name', 'decompression', 'version', 'specification')
dist_format_xroot = self._data_map['_digital_forms_root']
dist_format_xmap = {prop: xpath_map[prop] for prop in dist_format_props}
dist_formats = []
for digital_form in digital_forms:
dist_format = {prop: digital_form[prop] for prop in dist_format_props}
if digital_form.get('content'):
dist_spec = wrap_value(digital_form.get('specification'))
dist_spec.append(_DIGITAL_FORMS_CONTENT_DELIM)
dist_spec.extend(wrap_value(digital_form['content']))
dist_format['specification'] = dist_spec
dist_formats.append(dist_format)
update_props['values'] = dist_formats
dist_formats = update_complex_list(
xpath_root=dist_format_xroot, xpath_map=dist_format_xmap, **update_props
)
# Update all Network Resources: transferOptions+
trans_option_props = ('access_desc', 'access_instrs', 'network_resource')
trans_option_xroot = self._data_map['_transfer_options_root']
trans_option_xmap = {prop: self._data_map['_' + prop] for prop in trans_option_props}
trans_options = []
for digital_form in digital_forms:
trans_options.append({prop: digital_form[prop] for prop in trans_option_props})
update_props['values'] = trans_options
trans_options = update_complex_list(
xpath_root=trans_option_xroot, xpath_map=trans_option_xmap, **update_props
)
return {
'distribution_formats': dist_formats,
'transfer_options': trans_options
}
|
Update operation for ISO Digital Forms metadata
:see: gis_metadata.utils._complex_definitions[DIGITAL_FORMS]
|
def compute_K_numerical(dataframe, settings=None, keep_dir=None):
"""Use a finite-element modeling code to infer geometric factors for meshes
with topography or irregular electrode spacings.
Parameters
----------
dataframe : pandas.DataFrame
the data frame that contains the data
settings : dict
The settings required to compute the geometric factors. See examples
down below for more information in the required content.
keep_dir : path
if not None, copy modeling dir here
Returns
-------
K : :class:`numpy.ndarray`
K factors (are also directly written to the dataframe)
Examples
--------
::
settings = {
'rho': 100,
'elem': 'elem.dat',
'elec': 'elec.dat',
'sink_node': '100',
'2D': False,
}
"""
inversion_code = reda.rcParams.get('geom_factor.inversion_code', 'crtomo')
if inversion_code == 'crtomo':
import reda.utils.geom_fac_crtomo as geom_fac_crtomo
if keep_dir is not None:
keep_dir = os.path.abspath(keep_dir)
K = geom_fac_crtomo.compute_K(
dataframe, settings, keep_dir)
else:
raise Exception(
'Inversion code {0} not implemented for K computation'.format(
inversion_code
))
return K
|
Use a finite-element modeling code to infer geometric factors for meshes
with topography or irregular electrode spacings.
Parameters
----------
dataframe : pandas.DataFrame
the data frame that contains the data
settings : dict
The settings required to compute the geometric factors. See examples
down below for more information in the required content.
keep_dir : path
if not None, copy modeling dir here
Returns
-------
K : :class:`numpy.ndarray`
K factors (are also directly written to the dataframe)
Examples
--------
::
settings = {
'rho': 100,
'elem': 'elem.dat',
'elec': 'elec.dat',
'sink_node': '100',
'2D': False,
}
|
def pdf_extract_text(path, pdfbox_path, pwd='', timeout=120):
"""Utility to use PDFBox from pdfbox.apache.org to extract Text from a PDF
Parameters
----------
path : str
Path to source pdf-file
pdfbox_path : str
Path to pdfbox-app-x.y.z.jar
pwd : str, optional
Password for protected pdf files
timeout : int, optional
Seconds to wait for a result before raising an exception (defaults to 120).
Returns
-------
file
Writes the result as the name of the source file and appends '.txt'.
Notes
-----
- Requires pdfbox-app-x.y.z.jar in a recent version (see http://pdfbox.apache.org).
- Requires Java (JDK) 1.5 or newer (see http://www.oracle.com/technetwork/java/javase/downloads/index.html).
- Requires java to be on the PATH.
"""
if not os.path.isfile(path):
raise IOError('path must be the location of the source pdf-file')
if not os.path.isfile(pdfbox_path):
raise IOError('pdfbox_path must be the location of the pdfbox.jar')
import subprocess
for p in os.environ['PATH'].split(':'):
if os.path.isfile(os.path.join(p, 'java')):
break
else:
print('java is not on the PATH')
return
try:
if pwd == '':
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', path, path+'.txt']
else:
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', '-password', pwd,
path, path+'.txt']
subprocess.check_call(cmd, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=timeout)
except subprocess.TimeoutExpired as e:
print('Timeout of {:.1f} min expired'.format(timeout/60))
except subprocess.CalledProcessError as e:
print('Text could not successfully be extracted.')
|
Utility to use PDFBox from pdfbox.apache.org to extract Text from a PDF
Parameters
----------
path : str
Path to source pdf-file
pdfbox_path : str
Path to pdfbox-app-x.y.z.jar
pwd : str, optional
Password for protected pdf files
timeout : int, optional
Seconds to wait for a result before raising an exception (defaults to 120).
Returns
-------
file
Writes the result as the name of the source file and appends '.txt'.
Notes
-----
- Requires pdfbox-app-x.y.z.jar in a recent version (see http://pdfbox.apache.org).
- Requires Java (JDK) 1.5 or newer (see http://www.oracle.com/technetwork/java/javase/downloads/index.html).
- Requires java to be on the PATH.
|
def asarray(self, out=None, squeeze=True, lock=None, reopen=True,
maxsize=None, maxworkers=None, validate=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
Parameters
----------
out : numpy.ndarray, str, or file-like object
Buffer where image data will be saved.
If None (default), a new array will be created.
If numpy.ndarray, a writable array of compatible dtype and shape.
If 'memmap', directly memory-map the image data in the TIFF file
if possible; else create a memory-mapped array in a temporary file.
If str or open file, the file name or file object used to
create a memory-map to an array stored in a binary file on disk.
squeeze : bool
If True (default), all length-1 dimensions (except X and Y) are
squeezed out from the array.
If False, the shape of the returned array might be different from
the page.shape.
lock : {RLock, NullContext}
A reentrant lock used to syncronize reads from file.
If None (default), the lock of the parent's filehandle is used.
reopen : bool
If True (default) and the parent file handle is closed, the file
is temporarily re-opened and closed if no exception occurs.
maxsize: int
Maximum size of data before a ValueError is raised.
Can be used to catch DOS. Default: 16 TB.
maxworkers : int or None
Maximum number of threads to concurrently decode tile data.
If None (default), up to half the CPU cores are used for
compressed tiles.
See remarks in TiffFile.asarray.
validate : bool
If True (default), validate various parameters.
If None, only validate parameters and return None.
Returns
-------
numpy.ndarray
Numpy array of decompressed, depredicted, and unpacked image data
read from Strip/Tile Offsets/ByteCounts, formatted according to
shape and dtype metadata found in tags and parameters.
Photometric conversion, pre-multiplied alpha, orientation, and
colorimetry corrections are not applied. Specifically, CMYK images
are not converted to RGB, MinIsWhite images are not inverted,
and color palettes are not applied.
"""
# properties from TiffPage or TiffFrame
fh = self.parent.filehandle
byteorder = self.parent.tiff.byteorder
offsets, bytecounts = self._offsetscounts
self_ = self
self = self.keyframe # self or keyframe
if not self._shape or product(self._shape) == 0:
return None
tags = self.tags
if validate or validate is None:
if maxsize is None:
maxsize = 2**44
if maxsize and product(self._shape) > maxsize:
raise ValueError('data are too large %s' % str(self._shape))
if self.dtype is None:
raise ValueError('data type not supported: %s%i' % (
self.sampleformat, self.bitspersample))
if self.compression not in TIFF.DECOMPESSORS:
raise ValueError(
'cannot decompress %s' % self.compression.name)
if 'SampleFormat' in tags:
tag = tags['SampleFormat']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError(
'sample formats do not match %s' % tag.value)
if self.is_subsampled and (self.compression not in (6, 7) or
self.planarconfig == 2):
raise NotImplementedError('chroma subsampling not supported')
if validate is None:
return None
lock = fh.lock if lock is None else lock
with lock:
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError('file handle is closed')
dtype = self._dtype
shape = self._shape
imagewidth = self.imagewidth
imagelength = self.imagelength
imagedepth = self.imagedepth
bitspersample = self.bitspersample
typecode = byteorder + dtype.char
lsb2msb = self.fillorder == 2
istiled = self.is_tiled
if istiled:
tilewidth = self.tilewidth
tilelength = self.tilelength
tiledepth = self.tiledepth
tw = (imagewidth + tilewidth - 1) // tilewidth
tl = (imagelength + tilelength - 1) // tilelength
td = (imagedepth + tiledepth - 1) // tiledepth
tiledshape = (td, tl, tw)
tileshape = (tiledepth, tilelength, tilewidth, shape[-1])
runlen = tilewidth
else:
runlen = imagewidth
if self.planarconfig == 1:
runlen *= self.samplesperpixel
if isinstance(out, str) and out == 'memmap' and self.is_memmappable:
# direct memory map array in file
with lock:
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
# read contiguous bytes to array
if out is not None:
out = create_output(out, shape, dtype)
with lock:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape), out=out)
if lsb2msb:
bitorder_decode(result, out=result)
else:
# decompress, unpack,... individual strips or tiles
result = create_output(out, shape, dtype)
decompress = TIFF.DECOMPESSORS[self.compression]
if self.compression in (6, 7): # COMPRESSION.JPEG
colorspace = None
outcolorspace = None
jpegtables = None
if lsb2msb:
log.warning('TiffPage.asarray: disabling LSB2MSB for JPEG')
lsb2msb = False
if 'JPEGTables' in tags:
# load JPEGTables from TiffFrame
jpegtables = self_._gettags({347}, lock=lock)[0][1].value
# TODO: obtain table from OJPEG tags
# elif ('JPEGInterchangeFormat' in tags and
# 'JPEGInterchangeFormatLength' in tags and
# tags['JPEGInterchangeFormat'].value != offsets[0]):
# fh.seek(tags['JPEGInterchangeFormat'].value)
# fh.read(tags['JPEGInterchangeFormatLength'].value)
if 'ExtraSamples' in tags:
pass
elif self.photometric == 6:
# YCBCR -> RGB
outcolorspace = 'RGB'
elif self.photometric == 2:
if self.planarconfig == 2:
# TODO: decode JPEG to planar RGB
raise NotImplementedError(
'cannot decode JPEG to planar RGB')
colorspace = outcolorspace = 'RGB'
else:
outcolorspace = TIFF.PHOTOMETRIC(self.photometric).name
if istiled:
heightwidth = tilelength, tilewidth
else:
heightwidth = imagelength, imagewidth
def decompress(data, bitspersample=bitspersample,
jpegtables=jpegtables, colorspace=colorspace,
outcolorspace=outcolorspace, shape=heightwidth,
out=None, _decompress=decompress):
return _decompress(data, bitspersample, jpegtables,
colorspace, outcolorspace, shape, out)
def unpack(data):
return data.reshape(-1)
elif bitspersample in (8, 16, 32, 64, 128):
if (bitspersample * runlen) % 8:
raise ValueError('data and sample size mismatch')
if self.predictor == 3: # PREDICTOR.FLOATINGPOINT
# the floating-point horizontal differencing decoder
# needs the raw byte order
typecode = dtype.char
def unpack(data, typecode=typecode, out=None):
try:
# read only numpy array
return numpy.frombuffer(data, typecode)
except ValueError:
# strips may be missing EOI
# log.warning('TiffPage.asarray: ...')
bps = bitspersample // 8
xlen = (len(data) // bps) * bps
return numpy.frombuffer(data[:xlen], typecode)
elif isinstance(bitspersample, tuple):
def unpack(data, out=None):
return unpack_rgb(data, typecode, bitspersample)
else:
def unpack(data, out=None):
return packints_decode(data, typecode, bitspersample,
runlen)
# TODO: store decode function for future use
# TODO: unify tile and strip decoding
if istiled:
unpredict = TIFF.UNPREDICTORS[self.predictor]
def decode(tile, tileindex):
return tile_decode(tile, tileindex, tileshape, tiledshape,
lsb2msb, decompress, unpack, unpredict,
result[0])
tileiter = buffered_read(fh, lock, offsets, bytecounts)
if maxworkers is None:
maxworkers = 0 if self.compression > 1 else 1
if maxworkers == 0:
import multiprocessing # noqa: delay import
maxworkers = multiprocessing.cpu_count() // 2
if maxworkers < 2:
for i, tile in enumerate(tileiter):
decode(tile, i)
else:
# decode first tile un-threaded to catch exceptions
decode(next(tileiter), 0)
with ThreadPoolExecutor(maxworkers) as executor:
executor.map(decode, tileiter, range(1, len(offsets)))
else:
stripsize = self.rowsperstrip * self.imagewidth
if self.planarconfig == 1:
stripsize *= self.samplesperpixel
outsize = stripsize * self.dtype.itemsize
result = result.reshape(-1)
index = 0
for strip in buffered_read(fh, lock, offsets, bytecounts):
if lsb2msb:
strip = bitorder_decode(strip, out=strip)
strip = decompress(strip, out=outsize)
strip = unpack(strip)
size = min(result.size, strip.size, stripsize,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor != 1 and not (istiled and not self.is_contiguous):
unpredict = TIFF.UNPREDICTORS[self.predictor]
result = unpredict(result, axis=-2, out=result)
if squeeze:
try:
result.shape = self.shape
except ValueError:
log.warning('TiffPage.asarray: failed to reshape %s to %s',
result.shape, self.shape)
if closed:
# TODO: file should remain open if an exception occurred above
fh.close()
return result
|
Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
Parameters
----------
out : numpy.ndarray, str, or file-like object
Buffer where image data will be saved.
If None (default), a new array will be created.
If numpy.ndarray, a writable array of compatible dtype and shape.
If 'memmap', directly memory-map the image data in the TIFF file
if possible; else create a memory-mapped array in a temporary file.
If str or open file, the file name or file object used to
create a memory-map to an array stored in a binary file on disk.
squeeze : bool
If True (default), all length-1 dimensions (except X and Y) are
squeezed out from the array.
If False, the shape of the returned array might be different from
the page.shape.
lock : {RLock, NullContext}
A reentrant lock used to syncronize reads from file.
If None (default), the lock of the parent's filehandle is used.
reopen : bool
If True (default) and the parent file handle is closed, the file
is temporarily re-opened and closed if no exception occurs.
maxsize: int
Maximum size of data before a ValueError is raised.
Can be used to catch DOS. Default: 16 TB.
maxworkers : int or None
Maximum number of threads to concurrently decode tile data.
If None (default), up to half the CPU cores are used for
compressed tiles.
See remarks in TiffFile.asarray.
validate : bool
If True (default), validate various parameters.
If None, only validate parameters and return None.
Returns
-------
numpy.ndarray
Numpy array of decompressed, depredicted, and unpacked image data
read from Strip/Tile Offsets/ByteCounts, formatted according to
shape and dtype metadata found in tags and parameters.
Photometric conversion, pre-multiplied alpha, orientation, and
colorimetry corrections are not applied. Specifically, CMYK images
are not converted to RGB, MinIsWhite images are not inverted,
and color palettes are not applied.
|
def commissionerUnregister(self):
"""stop commissioner
Returns:
True: successful to stop commissioner
False: fail to stop commissioner
"""
print '%s call commissionerUnregister' % self.port
cmd = 'commissioner stop'
print cmd
return self.__sendCommand(cmd)[0] == 'Done'
|
stop commissioner
Returns:
True: successful to stop commissioner
False: fail to stop commissioner
|
def _get(self, url,
param_dict={},
securityHandler=None,
additional_headers=[],
handlers=[],
proxy_url=None,
proxy_port=None,
compress=True,
custom_handlers=[],
out_folder=None,
file_name=None):
"""
Performs a GET operation
Inputs:
Output:
returns dictionary, string or None
"""
self._last_method = "GET"
CHUNK = 4056
param_dict, handler, cj = self._processHandler(securityHandler, param_dict)
headers = [] + additional_headers
if compress:
headers.append(('Accept-encoding', 'gzip'))
else:
headers.append(('Accept-encoding', ''))
headers.append(('User-Agent', self.useragent))
if len(param_dict.keys()) == 0:
param_dict = None
if handlers is None:
handlers = []
if handler is not None:
handlers.append(handler)
handlers.append(RedirectHandler())
if cj is not None:
handlers.append(request.HTTPCookieProcessor(cj))
if proxy_url is not None:
if proxy_port is None:
proxy_port = 80
proxies = {"http":"http://%s:%s" % (proxy_url, proxy_port),
"https":"https://%s:%s" % (proxy_url, proxy_port)}
proxy_support = request.ProxyHandler(proxies)
handlers.append(proxy_support)
opener = request.build_opener(*handlers)
opener.addheaders = headers
if param_dict is None:
resp = opener.open(url, data=param_dict)
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
resp = opener.open(url, data=urlencode(param_dict))
else:
format_url = url + "?%s" % urlencode(param_dict)
resp = opener.open(fullurl=format_url)
self._last_code = resp.getcode()
self._last_url = resp.geturl()
# Get some headers from the response
maintype = self._mainType(resp)
contentDisposition = resp.headers.get('content-disposition')
contentEncoding = resp.headers.get('content-encoding')
contentType = resp.headers.get('content-Type').split(';')[0].lower()
contentLength = resp.headers.get('content-length')
if maintype.lower() in ('image',
'application/x-zip-compressed') or \
contentType == 'application/x-zip-compressed' or \
(contentDisposition is not None and \
contentDisposition.lower().find('attachment;') > -1):
fname = self._get_file_name(
contentDisposition=contentDisposition,
url=url)
if out_folder is None:
out_folder = tempfile.gettempdir()
if contentLength is not None:
max_length = int(contentLength)
if max_length < CHUNK:
CHUNK = max_length
file_name = os.path.join(out_folder, fname)
with open(file_name, 'wb') as writer:
for data in self._chunk(response=resp,
size=CHUNK):
writer.write(data)
writer.flush()
writer.flush()
del writer
return file_name
else:
read = ""
for data in self._chunk(response=resp,
size=CHUNK):
if self.PY3 == True:
read += data.decode('utf-8')
else:
read += data
del data
try:
results = json.loads(read)
if 'error' in results:
if 'message' in results['error']:
if results['error']['message'] == 'Request not made over ssl':
if url.startswith('http://'):
url = url.replace('http://', 'https://')
return self._get(url,
param_dict,
securityHandler,
additional_headers,
handlers,
proxy_url,
proxy_port,
compress,
custom_handlers,
out_folder,
file_name)
return results
except:
return read
|
Performs a GET operation
Inputs:
Output:
returns dictionary, string or None
|
def set_default_init_cli_cmds(self):
"""
Default init commands are set --retcode true, echo off, set --vt100 off, set dut <dut name>
and set testcase <tc name>
:return: List of default cli initialization commands.
"""
init_cli_cmds = []
init_cli_cmds.append("set --retcode true")
init_cli_cmds.append("echo off")
init_cli_cmds.append("set --vt100 off")
#set dut name as variable
init_cli_cmds.append('set dut "'+self.name+'"')
init_cli_cmds.append(['set testcase "' + self.testcase + '"', True])
return init_cli_cmds
|
Default init commands are set --retcode true, echo off, set --vt100 off, set dut <dut name>
and set testcase <tc name>
:return: List of default cli initialization commands.
|
def _check_callback(callback):
"""
Turns a callback that is potentially a class into a callable object.
Args:
callback (object): An object that might be a class, method, or function.
if the object is a class, this creates an instance of it.
Raises:
ValueError: If an instance can't be created or it isn't a callable object.
TypeError: If the class requires arguments to be instantiated.
Returns:
callable: A callable object suitable for use as the consumer callback.
"""
# If the callback is a class, create an instance of it first
if inspect.isclass(callback):
callback_object = callback()
if not callable(callback_object):
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
elif callable(callback):
callback_object = callback
else:
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
return callback_object
|
Turns a callback that is potentially a class into a callable object.
Args:
callback (object): An object that might be a class, method, or function.
if the object is a class, this creates an instance of it.
Raises:
ValueError: If an instance can't be created or it isn't a callable object.
TypeError: If the class requires arguments to be instantiated.
Returns:
callable: A callable object suitable for use as the consumer callback.
|
def loop(self, max_seconds=None):
"""
Main loop for the process. This will run continuously until maxiter
"""
loop_started = datetime.datetime.now()
self._is_running = True
while self._is_running:
self.process_error_queue(self.q_error)
if max_seconds is not None:
if (datetime.datetime.now() - loop_started).total_seconds() > max_seconds:
break
for subprocess in self._subprocesses:
if not subprocess.is_alive():
subprocess.start()
self.process_io_queue(self.q_stdout, sys.stdout)
self.process_io_queue(self.q_stderr, sys.stderr)
|
Main loop for the process. This will run continuously until maxiter
|
def ReadVarString(self, max=sys.maxsize):
"""
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator.
Args:
max (int): (Optional) maximum number of bytes to read.
Returns:
bytes:
"""
length = self.ReadVarInt(max)
return self.unpack(str(length) + 's', length)
|
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator.
Args:
max (int): (Optional) maximum number of bytes to read.
Returns:
bytes:
|
def make_posthook(self):
""" Run the post hook into the project directory. """
print(id(self.posthook), self.posthook)
print(id(super(self.__class__, self).posthook), super(self.__class__, self).posthook)
import ipdb;ipdb.set_trace()
if self.posthook:
os.chdir(self.project_name) # enter the project main directory
self.posthook()
|
Run the post hook into the project directory.
|
def en004(self, value=None):
""" Corresponds to IDD Field `en004`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `en004`
Unit: kJ/kg
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `en004`'.format(value))
self._en004 = value
|
Corresponds to IDD Field `en004`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `en004`
Unit: kJ/kg
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
def console(self):
"""starts to interact (starts interactive console) Something like code.InteractiveConsole"""
while True:
if six.PY2:
code = raw_input('>>> ')
else:
code = input('>>>')
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01)
|
starts to interact (starts interactive console) Something like code.InteractiveConsole
|
def set_attributes(self, cell_renderer, **attributes):
"""
:param cell_renderer: the :obj:`Gtk.CellRenderer` we're setting the attributes of
:type cell_renderer: :obj:`Gtk.CellRenderer`
{{ docs }}
"""
Gtk.CellLayout.clear_attributes(self, cell_renderer)
for (name, value) in attributes.items():
Gtk.CellLayout.add_attribute(self, cell_renderer, name, value)
|
:param cell_renderer: the :obj:`Gtk.CellRenderer` we're setting the attributes of
:type cell_renderer: :obj:`Gtk.CellRenderer`
{{ docs }}
|
def creep_kill(self, target, timestamp):
"""
A creep was tragically killed. Need to split this into radiant/dire
and neutrals
"""
self.creep_kill_types[target] += 1
matched = False
for k, v in self.creep_types.iteritems():
if target.startswith(k):
matched = True
setattr(self, v, getattr(self, v) + 1)
break
if not matched:
print('> unhandled creep type'.format(target))
|
A creep was tragically killed. Need to split this into radiant/dire
and neutrals
|
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, sub_area, view_id) -> None:
"""Called from hardware source when new data arrives."""
self.__state = state
self.__sub_area = sub_area
hardware_source_id = self.__hardware_source.hardware_source_id
channel_index = self.index
channel_id = self.channel_id
channel_name = self.name
metadata = copy.deepcopy(data_and_metadata.metadata)
hardware_source_metadata = dict()
hardware_source_metadata["hardware_source_id"] = hardware_source_id
hardware_source_metadata["channel_index"] = channel_index
if channel_id is not None:
hardware_source_metadata["reference_key"] = "_".join([hardware_source_id, channel_id])
hardware_source_metadata["channel_id"] = channel_id
else:
hardware_source_metadata["reference_key"] = hardware_source_id
if channel_name is not None:
hardware_source_metadata["channel_name"] = channel_name
if view_id:
hardware_source_metadata["view_id"] = view_id
metadata.setdefault("hardware_source", dict()).update(hardware_source_metadata)
data = data_and_metadata.data
master_data = self.__data_and_metadata.data if self.__data_and_metadata else None
data_matches = master_data is not None and data.shape == master_data.shape and data.dtype == master_data.dtype
if data_matches and sub_area is not None:
top = sub_area[0][0]
bottom = sub_area[0][0] + sub_area[1][0]
left = sub_area[0][1]
right = sub_area[0][1] + sub_area[1][1]
if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[1]:
master_data = numpy.copy(master_data)
master_data[top:bottom, left:right] = data[top:bottom, left:right]
else:
master_data = numpy.copy(data)
else:
master_data = data # numpy.copy(data). assume data does not need a copy.
data_descriptor = data_and_metadata.data_descriptor
intensity_calibration = data_and_metadata.intensity_calibration if data_and_metadata else None
dimensional_calibrations = data_and_metadata.dimensional_calibrations if data_and_metadata else None
timestamp = data_and_metadata.timestamp
new_extended_data = DataAndMetadata.new_data_and_metadata(master_data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor)
self.__data_and_metadata = new_extended_data
self.data_channel_updated_event.fire(new_extended_data)
self.is_dirty = True
|
Called from hardware source when new data arrives.
|
def set_chat_photo(self, chat_id, photo):
"""
Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting is off in the target group.
https://core.telegram.org/bots/api#setchatphoto
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns True on success
:rtype: bool
"""
from pytgbot.api_types.sendable.files import InputFile
assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id")
assert_type_or_raise(photo, InputFile, parameter_name="photo")
result = self.do("setChatPhoto", chat_id=chat_id, photo=photo)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
try:
return from_array_list(bool, result, list_level=0, is_builtin=True)
except TgApiParseException:
logger.debug("Failed parsing as primitive bool", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result
|
Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting is off in the target group.
https://core.telegram.org/bots/api#setchatphoto
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:type chat_id: int | str|unicode
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns True on success
:rtype: bool
|
def resolve_variables(self, provided_variables):
"""Resolve the values of the blueprint variables.
This will resolve the values of the `VARIABLES` with values from the
env file, the config, and any lookups resolved.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables
"""
self.resolved_variables = {}
defined_variables = self.defined_variables()
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, var_def in defined_variables.items():
value = resolve_variable(
var_name,
var_def,
variable_dict.get(var_name),
self.name
)
self.resolved_variables[var_name] = value
|
Resolve the values of the blueprint variables.
This will resolve the values of the `VARIABLES` with values from the
env file, the config, and any lookups resolved.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables
|
def prt_results(self, goea_results):
"""Print GOEA results to the screen or to a file."""
# objaart = self.prepgrp.get_objaart(goea_results) if self.prepgrp is not None else None
if self.args.outfile is None:
self._prt_results(goea_results)
else:
# Users can print to both tab-separated file and xlsx file in one run.
outfiles = self.args.outfile.split(",")
grpwr = self.prepgrp.get_objgrpwr(goea_results) if self.prepgrp else None
if grpwr is None:
self.prt_outfiles_flat(goea_results, outfiles)
else:
grpwr.prt_outfiles_grouped(outfiles)
|
Print GOEA results to the screen or to a file.
|
def get_serializer(self, *args, **kwargs):
"""
Returns the serializer instance that should be used to the
given action.
If any action was given, returns the serializer_class
"""
action = kwargs.pop('action', None)
serializer_class = self.get_serializer_class(action)
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
|
Returns the serializer instance that should be used to the
given action.
If any action was given, returns the serializer_class
|
def iter_directory(directory):
"""Given a directory, yield all files recursivley as a two-tuple (filepath, s3key)"""
for path, dir, files in os.walk(directory):
for f in files:
filepath = os.path.join(path, f)
key = os.path.relpath(filepath, directory)
yield (filepath, key)
|
Given a directory, yield all files recursivley as a two-tuple (filepath, s3key)
|
def pop(self):
"""Pop a reading off of this stream and return it."""
if self._count == 0:
raise StreamEmptyError("Pop called on buffered stream walker without any data", selector=self.selector)
while True:
curr = self.engine.get(self.storage_type, self.offset)
self.offset += 1
stream = DataStream.FromEncoded(curr.stream)
if self.matches(stream):
self._count -= 1
return curr
|
Pop a reading off of this stream and return it.
|
def get_tokens_by_code(self, code, state):
"""Function to get access code for getting the user details from the
OP. It is called after the user authorizes by visiting the auth URL.
Parameters:
* **code (string):** code, parse from the callback URL querystring
* **state (string):** state value parsed from the callback URL
Returns:
**dict:** The tokens object with the following data structure.
Example response::
{
"access_token": "<token string>",
"expires_in": 3600,
"refresh_token": "<token string>",
"id_token": "<token string>",
"id_token_claims":
{
"iss": "https://server.example.com",
"sub": "24400320",
"aud": "s6BhdRkqt3",
"nonce": "n-0S6_WzA2Mj",
"exp": 1311281970,
"iat": 1311280970,
"at_hash": "MTIzNDU2Nzg5MDEyMzQ1Ng"
}
}
Raises:
**OxdServerError:** If oxd server throws an error OR if the params code
and scopes are of improper data type.
"""
params = dict(oxd_id=self.oxd_id, code=code, state=state)
logger.debug("Sending command `get_tokens_by_code` with params %s",
params)
response = self.msgr.request("get_tokens_by_code", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']
|
Function to get access code for getting the user details from the
OP. It is called after the user authorizes by visiting the auth URL.
Parameters:
* **code (string):** code, parse from the callback URL querystring
* **state (string):** state value parsed from the callback URL
Returns:
**dict:** The tokens object with the following data structure.
Example response::
{
"access_token": "<token string>",
"expires_in": 3600,
"refresh_token": "<token string>",
"id_token": "<token string>",
"id_token_claims":
{
"iss": "https://server.example.com",
"sub": "24400320",
"aud": "s6BhdRkqt3",
"nonce": "n-0S6_WzA2Mj",
"exp": 1311281970,
"iat": 1311280970,
"at_hash": "MTIzNDU2Nzg5MDEyMzQ1Ng"
}
}
Raises:
**OxdServerError:** If oxd server throws an error OR if the params code
and scopes are of improper data type.
|
def _merge_mappings(*args):
"""Merges a sequence of dictionaries and/or tuples into a single dictionary.
If a given argument is a tuple, it must have two elements, the first of which is a sequence of keys and the second
of which is a single value, which will be mapped to from each of the keys in the sequence.
"""
dct = {}
for arg in args:
if isinstance(arg, dict):
merge = arg
else:
assert isinstance(arg, tuple)
keys, value = arg
merge = dict(zip(keys, [value]*len(keys)))
dct.update(merge)
return dct
|
Merges a sequence of dictionaries and/or tuples into a single dictionary.
If a given argument is a tuple, it must have two elements, the first of which is a sequence of keys and the second
of which is a single value, which will be mapped to from each of the keys in the sequence.
|
def AddPoly(self, poly, smart_duplicate_handling=True):
"""
Adds a new polyline to the collection.
"""
inserted_name = poly.GetName()
if poly.GetName() in self._name_to_shape:
if not smart_duplicate_handling:
raise ShapeError("Duplicate shape found: " + poly.GetName())
print ("Warning: duplicate shape id being added to collection: " +
poly.GetName())
if poly.GreedyPolyMatchDist(self._name_to_shape[poly.GetName()]) < 10:
print(" (Skipping as it apears to be an exact duplicate)")
else:
print(" (Adding new shape variant with uniquified name)")
inserted_name = "%s-%d" % (inserted_name, len(self._name_to_shape))
self._name_to_shape[inserted_name] = poly
|
Adds a new polyline to the collection.
|
def get_genes_for_hgnc_id(self, hgnc_symbol):
""" obtain the ensembl gene IDs that correspond to a HGNC symbol
"""
headers = {"content-type": "application/json"}
# http://grch37.rest.ensembl.org/xrefs/symbol/homo_sapiens/KMT2A?content-type=application/json
self.attempt = 0
ext = "/xrefs/symbol/homo_sapiens/{}".format(hgnc_symbol)
r = self.ensembl_request(ext, headers)
genes = []
for item in json.loads(r):
if item["type"] == "gene":
genes.append(item["id"])
return genes
|
obtain the ensembl gene IDs that correspond to a HGNC symbol
|
def main_nonexecutable_region_limbos_contain(self, addr, tolerance_before=64, tolerance_after=64):
"""
Sometimes there exists a pointer that points to a few bytes before the beginning of a section, or a few bytes
after the beginning of the section. We take care of that here.
:param int addr: The address to check.
:return: A 2-tuple of (bool, the closest base address)
:rtype: tuple
"""
closest_region = None
least_limbo = None
for start, end in self.main_nonexecutable_regions:
if start - tolerance_before <= addr < start:
if least_limbo is None or start - addr < least_limbo:
closest_region = (True, start)
least_limbo = start - addr
if end <= addr < end + tolerance_after:
if least_limbo is None or addr - end < least_limbo:
closest_region = (True, end)
least_limbo = addr - end
if closest_region is not None:
return closest_region
return False, None
|
Sometimes there exists a pointer that points to a few bytes before the beginning of a section, or a few bytes
after the beginning of the section. We take care of that here.
:param int addr: The address to check.
:return: A 2-tuple of (bool, the closest base address)
:rtype: tuple
|
def create_node(hostname, username, password, name, address):
'''
Create a new node if it does not already exist.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to create
address
The address of the node
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
return _test_output(ret, 'create', params={
'hostname': hostname,
'username': username,
'password': password,
'name': name,
'address': address
}
)
#is this node currently configured?
existing = __salt__['bigip.list_node'](hostname, username, password, name)
# if it exists
if existing['code'] == 200:
ret['result'] = True
ret['comment'] = 'A node by this name currently exists. No change made.'
# if it doesn't exist
elif existing['code'] == 404:
response = __salt__['bigip.create_node'](hostname, username, password, name, address)
ret['result'] = True
ret['changes']['old'] = {}
ret['changes']['new'] = response['content']
ret['comment'] = 'Node was successfully created.'
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
|
Create a new node if it does not already exist.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to create
address
The address of the node
|
def ReadSerializableArray(self, class_name, max=sys.maxsize):
"""
Deserialize a stream into the object specific by `class_name`.
Args:
class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block'
max (int): (Optional) maximum number of bytes to read.
Returns:
list: list of `class_name` objects deserialized from the stream.
"""
module = '.'.join(class_name.split('.')[:-1])
klassname = class_name.split('.')[-1]
klass = getattr(importlib.import_module(module), klassname)
length = self.ReadVarInt(max=max)
items = []
# logger.info("READING ITEM %s %s " % (length, class_name))
try:
for i in range(0, length):
item = klass()
item.Deserialize(self)
# logger.info("deserialized item %s %s " % ( i, item))
items.append(item)
except Exception as e:
logger.error("Couldn't deserialize %s " % e)
return items
|
Deserialize a stream into the object specific by `class_name`.
Args:
class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block'
max (int): (Optional) maximum number of bytes to read.
Returns:
list: list of `class_name` objects deserialized from the stream.
|
def get_vnetwork_portgroups_output_vnetwork_pgs_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
vlan = ET.SubElement(vnetwork_pgs, "vlan")
vlan.text = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _convert_pflags(self, pflags):
"""convert SFTP-style open() flags to Python's os.open() flags"""
if (pflags & SFTP_FLAG_READ) and (pflags & SFTP_FLAG_WRITE):
flags = os.O_RDWR
elif pflags & SFTP_FLAG_WRITE:
flags = os.O_WRONLY
else:
flags = os.O_RDONLY
if pflags & SFTP_FLAG_APPEND:
flags |= os.O_APPEND
if pflags & SFTP_FLAG_CREATE:
flags |= os.O_CREAT
if pflags & SFTP_FLAG_TRUNC:
flags |= os.O_TRUNC
if pflags & SFTP_FLAG_EXCL:
flags |= os.O_EXCL
return flags
|
convert SFTP-style open() flags to Python's os.open() flags
|
def get_requires(self, profile=None):
"""Get filtered list of Require objects in this Feature
:param str profile: Return Require objects with this profile or None
to return all Require objects.
:return: list of Require objects
"""
out = []
for req in self.requires:
# Filter Require by profile
if ((req.profile and not profile) or
(req.profile and profile and req.profile != profile)):
continue
out.append(req)
return out
|
Get filtered list of Require objects in this Feature
:param str profile: Return Require objects with this profile or None
to return all Require objects.
:return: list of Require objects
|
def get_relavent_units(self):
'''
Retrieves the relevant units for this data block.
Returns:
All flags related to this block.
'''
relavent_units = {}
for location,unit in self.units.items():
if self.unit_is_related(location, self.worksheet):
relavent_units[location] = unit
return relavent_units
|
Retrieves the relevant units for this data block.
Returns:
All flags related to this block.
|
def get_annotation_data_before_time(self, id_tier, time):
"""Give the annotation before a given time. When the tier contains
reference annotations this will be returned, check
:func:`get_ref_annotation_data_before_time` for the format. If an
annotation overlaps with ``time`` that annotation will be returned.
:param str id_tier: Name of the tier.
:param int time: Time to get the annotation before.
:raises KeyError: If the tier is non existent.
"""
if self.tiers[id_tier][1]:
return self.get_ref_annotation_before_time(id_tier, time)
befores = self.get_annotation_data_between_times(id_tier, 0, time)
if befores:
return [max(befores, key=lambda x: x[0])]
else:
return []
|
Give the annotation before a given time. When the tier contains
reference annotations this will be returned, check
:func:`get_ref_annotation_data_before_time` for the format. If an
annotation overlaps with ``time`` that annotation will be returned.
:param str id_tier: Name of the tier.
:param int time: Time to get the annotation before.
:raises KeyError: If the tier is non existent.
|
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation.
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if type(self) is not DateOffset:
raise NotImplementedError("DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(
name=self.__class__.__name__))
kwds = self.kwds
relativedelta_fast = {'years', 'months', 'weeks', 'days', 'hours',
'minutes', 'seconds', 'microseconds'}
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(kwds).issubset(relativedelta_fast)):
months = ((kwds.get('years', 0) * 12 +
kwds.get('months', 0)) * self.n)
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = type(i)(shifted, freq=i.freq, dtype=i.dtype)
weeks = (kwds.get('weeks', 0)) * self.n
if weeks:
# integer addition on PeriodIndex is deprecated,
# so we directly use _time_shift instead
asper = i.to_period('W')
if not isinstance(asper._data, np.ndarray):
# unwrap PeriodIndex --> PeriodArray
asper = asper._data
shifted = asper._time_shift(weeks)
i = shifted.to_timestamp() + i.to_perioddelta('W')
timedelta_kwds = {k: v for k, v in kwds.items()
if k in ['days', 'hours', 'minutes',
'seconds', 'microseconds']}
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
kwd = set(kwds) - relativedelta_fast
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd))
|
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation.
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
|
def _draw_box(self, parent_node, quartiles, outliers, box_index, metadata):
"""
Return the center of a bounding box defined by a box plot.
Draws a box plot on self.svg.
"""
width = (self.view.x(1) - self.view.x(0)) / self._order
series_margin = width * self._series_margin
left_edge = self.view.x(0) + width * box_index + series_margin
width -= 2 * series_margin
# draw lines for whiskers - bottom, median, and top
for i, whisker in enumerate((quartiles[0], quartiles[2],
quartiles[4])):
whisker_width = width if i == 1 else width / 2
shift = (width - whisker_width) / 2
xs = left_edge + shift
xe = left_edge + width - shift
alter(
self.svg.line(
parent_node,
coords=[(xs, self.view.y(whisker)),
(xe, self.view.y(whisker))],
class_='reactive tooltip-trigger',
attrib={'stroke-width': 3}
), metadata
)
# draw lines connecting whiskers to box (Q1 and Q3)
alter(
self.svg.line(
parent_node,
coords=[(left_edge + width / 2, self.view.y(quartiles[0])),
(left_edge + width / 2, self.view.y(quartiles[1]))],
class_='reactive tooltip-trigger',
attrib={'stroke-width': 2}
), metadata
)
alter(
self.svg.line(
parent_node,
coords=[(left_edge + width / 2, self.view.y(quartiles[4])),
(left_edge + width / 2, self.view.y(quartiles[3]))],
class_='reactive tooltip-trigger',
attrib={'stroke-width': 2}
), metadata
)
# box, bounded by Q1 and Q3
alter(
self.svg.node(
parent_node,
tag='rect',
x=left_edge,
y=self.view.y(quartiles[1]),
height=self.view.y(quartiles[3]) - self.view.y(quartiles[1]),
width=width,
class_='subtle-fill reactive tooltip-trigger'
), metadata
)
# draw outliers
for o in outliers:
alter(
self.svg.node(
parent_node,
tag='circle',
cx=left_edge + width / 2,
cy=self.view.y(o),
r=3,
class_='subtle-fill reactive tooltip-trigger'
), metadata
)
return (
left_edge + width / 2,
self.view.y(sum(quartiles) / len(quartiles))
)
|
Return the center of a bounding box defined by a box plot.
Draws a box plot on self.svg.
|
def pkgPath(root, path, rpath="/"):
"""
Package up a path recursively
"""
global data_files
if not os.path.exists(path):
return
files = []
for spath in os.listdir(path):
# Ignore test directories
if spath == 'test':
continue
subpath = os.path.join(path, spath)
spath = os.path.join(rpath, spath)
if os.path.isfile(subpath):
files.append(subpath)
if os.path.isdir(subpath):
pkgPath(root, subpath, spath)
data_files.append((root + rpath, files))
|
Package up a path recursively
|
def fit_labels_to_mask(label_image, mask):
r"""
Reduces a label images by overlaying it with a binary mask and assign the labels
either to the mask or to the background. The resulting binary mask is the nearest
expression the label image can form of the supplied binary mask.
Parameters
----------
label_image : array_like
A nD label map.
mask : array_like
A mask image, i.e., a binary image with False for background and True for foreground.
Returns
-------
best_fit : ndarray
The best fit of the labels to the mask.
Raises
------
ValueError
If ``label_image`` and ``mask`` are not of the same shape.
"""
label_image = scipy.asarray(label_image)
mask = scipy.asarray(mask, dtype=scipy.bool_)
if label_image.shape != mask.shape:
raise ValueError('The input images must be of the same shape.')
# prepare collection dictionaries
labels = scipy.unique(label_image)
collection = {}
for label in labels:
collection[label] = [0, 0, []] # size, union, points
# iterate over the label images pixels and collect position, size and union
for x in range(label_image.shape[0]):
for y in range(label_image.shape[1]):
for z in range(label_image.shape[2]):
entry = collection[label_image[x,y,z]]
entry[0] += 1
if mask[x,y,z]: entry[1] += 1
entry[2].append((x,y,z))
# select labels that are more than half in the mask
for label in labels:
if collection[label][0] / 2. >= collection[label][1]:
del collection[label]
# image_result = numpy.zeros_like(mask) this is eq. to mask.copy().fill(0), which directly applied does not allow access to the rows and colums: Why?
image_result = mask.copy()
image_result.fill(False)
# add labels to result mask
for label, data in list(collection.items()):
for point in data[2]:
image_result[point] = True
return image_result
|
r"""
Reduces a label images by overlaying it with a binary mask and assign the labels
either to the mask or to the background. The resulting binary mask is the nearest
expression the label image can form of the supplied binary mask.
Parameters
----------
label_image : array_like
A nD label map.
mask : array_like
A mask image, i.e., a binary image with False for background and True for foreground.
Returns
-------
best_fit : ndarray
The best fit of the labels to the mask.
Raises
------
ValueError
If ``label_image`` and ``mask`` are not of the same shape.
|
def root_item_selected(self, item):
"""Root item has been selected: expanding it and collapsing others"""
if self.show_all_files:
return
for root_item in self.get_top_level_items():
if root_item is item:
self.expandItem(root_item)
else:
self.collapseItem(root_item)
|
Root item has been selected: expanding it and collapsing others
|
def _filter_modules(self, plugins, names):
"""
Internal helper method to parse all of the plugins and names
through each of the module filters
"""
if self.module_plugin_filters:
# check to make sure the number of plugins isn't changing
original_length_plugins = len(plugins)
module_plugins = set()
for module_filter in self.module_plugin_filters:
module_plugins.update(module_filter(plugins, names))
if len(plugins) < original_length_plugins:
warning = """Module Filter removing plugins from original
data member! Suggest creating a new list in each module
filter and returning new list instead of modifying the
original data member so subsequent module filters can have
access to all the possible plugins.\n {}"""
self._log.info(warning.format(module_filter))
plugins = module_plugins
return plugins
|
Internal helper method to parse all of the plugins and names
through each of the module filters
|
def _wait_and_except_if_failed(self, event, timeout=None):
"""Combines waiting for event and call to `_except_if_failed`. If timeout is not specified the configured
sync_timeout is used.
"""
event.wait(timeout or self.__sync_timeout)
self._except_if_failed(event)
|
Combines waiting for event and call to `_except_if_failed`. If timeout is not specified the configured
sync_timeout is used.
|
def get_version():
"""
Get the Windows OS version running on the machine.
Params:
None
Returns:
The Windows OS version running on the machine (comparables with the values list in the class).
"""
# Other OS check
if not 'win' in sys.platform:
return NO_WIN
# Get infos
win_ver = sys.getwindowsversion()
try:
# Python 3.6.x or upper -> Use 'platform_version' attribute
major, minor, build = win_ver.platform_version
except AttributeError:
if sys.version_info < (3, 0):
# Python 2.7.x -> Use 'platform' module to ensure the correct values (seems that Win 10 is not correctly detected)
from platform import _get_real_winver
major, minor, build = _get_real_winver(win_ver.major, win_ver.minor, win_ver.build)
major, minor, build = int(major), int(minor), int(build) # 'long' to 'int'
else:
# Python 3.0.x - 3.5.x -> Keep 'sys.getwindowsversion()'' values
major, minor, build = win_ver.major, win_ver.minor, win_ver.build
# Check is is server or not (it works only on Python 2.7.x or newer)
try:
is_server = 1 if win_ver.product_type == 3 else 0
except AttributeError:
is_server = 0
# Parse Service Pack version (or Build number)
try:
if major == 10:
# The OS is Windows 10 or Windows Server 2016,
# so the service pack version is instead the Build number
sp_ver = build
else:
sp_ver = win_ver.service_pack_major or 0
except AttributeError:
try:
sp_ver = int(win_ver.service_pack.rsplit(' ', 1))
except (IndexError, ValueError):
sp_ver = 0
# Return the final version data
return (major, minor, sp_ver, is_server)
|
Get the Windows OS version running on the machine.
Params:
None
Returns:
The Windows OS version running on the machine (comparables with the values list in the class).
|
def redraw(self, whence=0):
"""Redraw the canvas.
Parameters
----------
whence
See :meth:`get_rgb_object`.
"""
with self._defer_lock:
whence = min(self._defer_whence, whence)
if not self.defer_redraw:
if self._hold_redraw_cnt == 0:
self._defer_whence = self._defer_whence_reset
self.redraw_now(whence=whence)
else:
self._defer_whence = whence
return
elapsed = time.time() - self.time_last_redraw
# If there is no redraw scheduled, or we are overdue for one:
if (not self._defer_flag) or (elapsed > self.defer_lagtime):
# If more time than defer_lagtime has passed since the
# last redraw then just do the redraw immediately
if elapsed > self.defer_lagtime:
if self._hold_redraw_cnt > 0:
#self._defer_flag = True
self._defer_whence = whence
return
self._defer_whence = self._defer_whence_reset
self.logger.debug("lagtime expired--forced redraw")
self.redraw_now(whence=whence)
return
# Indicate that a redraw is necessary and record whence
self._defer_flag = True
self._defer_whence = whence
# schedule a redraw by the end of the defer_lagtime
secs = self.defer_lagtime - elapsed
self.logger.debug("defer redraw (whence=%.2f) in %.f sec" % (
whence, secs))
self.reschedule_redraw(secs)
else:
# A redraw is already scheduled. Just record whence.
self._defer_whence = whence
self.logger.debug("update whence=%.2f" % (whence))
|
Redraw the canvas.
Parameters
----------
whence
See :meth:`get_rgb_object`.
|
def set_mode_px4(self, mode, custom_mode, custom_sub_mode):
'''enter arbitrary mode'''
if isinstance(mode, str):
mode_map = self.mode_mapping()
if mode_map is None or mode not in mode_map:
print("Unknown mode '%s'" % mode)
return
# PX4 uses two fields to define modes
mode, custom_mode, custom_sub_mode = px4_map[mode]
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_MODE, 0, mode, custom_mode, custom_sub_mode, 0, 0, 0, 0)
|
enter arbitrary mode
|
def updateStatus(self, dataset, is_dataset_valid):
"""
Used to toggle the status of a dataset is_dataset_valid=0/1 (invalid/valid)
"""
if( dataset == "" ):
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/updateStatus. dataset is required.")
conn = self.dbi.connection()
trans = conn.begin()
try:
self.updatestatus.execute(conn, dataset, is_dataset_valid, trans)
trans.commit()
trans = None
except Exception as ex:
if trans:
trans.rollback()
raise ex
finally:
if trans:
trans.rollback()
if conn:
conn.close()
|
Used to toggle the status of a dataset is_dataset_valid=0/1 (invalid/valid)
|
def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix associated to X."""
vyt = self.variance_Yt
vyx = self.variance_Yx
lyt = 1./(2*self.lengthscale_Yt)
lyx = 1./(2*self.lengthscale_Yx)
a = self.a
b = self.b
c = self.c
## dk^2/dtdt'
k1 = (2*lyt )*vyt*vyx
## dk^2/dx^2
k2 = ( - 2*lyx )*vyt*vyx
## dk^4/dx^2dx'^2
k3 = ( 4*3*lyx**2 )*vyt*vyx
Kdiag = np.zeros(X.shape[0])
slices = index_to_slices(X[:,-1])
for i, ss1 in enumerate(slices):
for s1 in ss1:
if i==0:
Kdiag[s1]+= vyt*vyx
elif i==1:
#i=1
Kdiag[s1]+= b**2*k1 - 2*a*c*k2 + a**2*k3 + c**2*vyt*vyx
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
raise ValueError("invalid input/output index")
return Kdiag
|
Compute the diagonal of the covariance matrix associated to X.
|
def collect(context=None, style=None, palette=None, **kwargs):
"""Returns the merged rcParams dict of the specified context, style, and palette.
Parameters
----------
context: str
style: str
palette: str
kwargs:
-
Returns
-------
rcParams: dict
The merged parameter dicts of the specified context, style, and palette.
Notes
-----
The rcParams dicts are loaded and updated in the order: context, style, palette. That means if
a context parameter is also defined in the style or palette dict, it will be overwritten. There
is currently no checking being done to avoid this.
"""
params = {}
if context:
params.update(get(context, 'context', **kwargs))
if style:
params.update(get(style, 'style', **kwargs))
if palette:
params.update(get(palette, 'palette', **kwargs))
return params
|
Returns the merged rcParams dict of the specified context, style, and palette.
Parameters
----------
context: str
style: str
palette: str
kwargs:
-
Returns
-------
rcParams: dict
The merged parameter dicts of the specified context, style, and palette.
Notes
-----
The rcParams dicts are loaded and updated in the order: context, style, palette. That means if
a context parameter is also defined in the style or palette dict, it will be overwritten. There
is currently no checking being done to avoid this.
|
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
|
Receives a file path has input and returns a
string with the contents of the file
|
def happens(intervals: Iterable[float], name: Optional[str] = None) -> Callable:
"""
Decorator used to set up a process that adds a new instance of another process at intervals dictated by the given
sequence (which may be infinite).
Example: the following program runs process named `my_process` 5 times, each time spaced by 2.0 time units.
```
from itertools import repeat
sim = Simulator()
log = []
@happens(repeat(2.0, 5))
def my_process(the_log):
the_log.append(now())
sim.add(my_process, log)
sim.run()
print(str(log)) # Expect: [2.0, 4.0, 6.0, 8.0, 10.0]
```
"""
def hook(event: Callable):
def make_happen(*args_event: Any, **kwargs_event: Any) -> None:
if name is not None:
local.name = cast(str, name)
for interval in intervals:
advance(interval)
add(event, *args_event, **kwargs_event)
return make_happen
return hook
|
Decorator used to set up a process that adds a new instance of another process at intervals dictated by the given
sequence (which may be infinite).
Example: the following program runs process named `my_process` 5 times, each time spaced by 2.0 time units.
```
from itertools import repeat
sim = Simulator()
log = []
@happens(repeat(2.0, 5))
def my_process(the_log):
the_log.append(now())
sim.add(my_process, log)
sim.run()
print(str(log)) # Expect: [2.0, 4.0, 6.0, 8.0, 10.0]
```
|
def last_archive(self):
'''
Get the last available archive
:return:
'''
archives = {}
for archive in self.archives():
archives[int(archive.split('.')[0].split('-')[-1])] = archive
return archives and archives[max(archives)] or None
|
Get the last available archive
:return:
|
def _make_image_to_vec_tito(feature_name, tmp_dir=None, checkpoint=None):
"""Creates a tensor-in-tensor-out function that produces embeddings from image bytes.
Image to embedding is implemented with Tensorflow's inception v3 model and a pretrained
checkpoint. It returns 1x2048 'PreLogits' embeddings for each image.
Args:
feature_name: The name of the feature. Used only to identify the image tensors so
we can get gradients for probe in image prediction explaining.
tmp_dir: a local directory that is used for downloading the checkpoint. If
non, a temp folder will be made and deleted.
checkpoint: the inception v3 checkpoint gs or local path. If None, default checkpoint
is used.
Returns: a tensor-in-tensor-out function that takes image string tensor and returns embeddings.
"""
def _image_to_vec(image_str_tensor):
def _decode_and_resize(image_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image_tensor = tf.where(tf.equal(image_tensor, ''), IMAGE_DEFAULT_STRING, image_tensor)
# Fork by whether image_tensor value is a file path, or a base64 encoded string.
slash_positions = tf.equal(tf.string_split([image_tensor], delimiter="").values, '/')
is_file_path = tf.cast(tf.count_nonzero(slash_positions), tf.bool)
# The following two functions are required for tf.cond. Note that we can not replace them
# with lambda. According to TF docs, if using inline lambda, both branches of condition
# will be executed. The workaround is to use a function call.
def _read_file():
return tf.read_file(image_tensor)
def _decode_base64():
return tf.decode_base64(image_tensor)
image = tf.cond(is_file_path, lambda: _read_file(), lambda: _decode_base64())
image = tf.image.decode_jpeg(image, channels=channels)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image
# The CloudML Prediction API always "feeds" the Tensorflow graph with
# dynamic batch sizes e.g. (?,). decode_jpeg only processes scalar
# strings because it cannot guarantee a batch of images would have
# the same output size. We use tf.map_fn to give decode_jpeg a scalar
# string from dynamic batches.
image = tf.map_fn(_decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# "gradients_[feature_name]" will be used for computing integrated gradients.
image = tf.identity(image, name='gradients_' + feature_name)
image = tf.subtract(image, 0.5)
inception_input = tf.multiply(image, 2.0)
# Build Inception layers, which expect a tensor of type float from [-1, 1)
# and shape [batch_size, height, width, channels].
with tf.contrib.slim.arg_scope(inception_v3_arg_scope()):
_, end_points = inception_v3(inception_input, is_training=False)
embeddings = end_points['PreLogits']
inception_embeddings = tf.squeeze(embeddings, [1, 2], name='SpatialSqueeze')
return inception_embeddings
def _tito_from_checkpoint(tito_in, checkpoint, exclude):
""" Create an all-constants tito function from an original tito function.
Given a tensor-in-tensor-out function which contains variables and a checkpoint path,
create a new tensor-in-tensor-out function which includes only constants, and can be
used in tft.map.
"""
def _tito_out(tensor_in):
checkpoint_dir = tmp_dir
if tmp_dir is None:
checkpoint_dir = tempfile.mkdtemp()
g = tf.Graph()
with g.as_default():
si = tf.placeholder(dtype=tensor_in.dtype, shape=tensor_in.shape, name=tensor_in.op.name)
so = tito_in(si)
all_vars = tf.contrib.slim.get_variables_to_restore(exclude=exclude)
saver = tf.train.Saver(all_vars)
# Downloading the checkpoint from GCS to local speeds up saver.restore() a lot.
checkpoint_tmp = os.path.join(checkpoint_dir, 'checkpoint')
with file_io.FileIO(checkpoint, 'r') as f_in, file_io.FileIO(checkpoint_tmp, 'w') as f_out:
f_out.write(f_in.read())
with tf.Session() as sess:
saver.restore(sess, checkpoint_tmp)
output_graph_def = tf.graph_util.convert_variables_to_constants(sess,
g.as_graph_def(),
[so.op.name])
file_io.delete_file(checkpoint_tmp)
if tmp_dir is None:
shutil.rmtree(checkpoint_dir)
tensors_out = tf.import_graph_def(output_graph_def,
input_map={si.name: tensor_in},
return_elements=[so.name])
return tensors_out[0]
return _tito_out
if not checkpoint:
checkpoint = INCEPTION_V3_CHECKPOINT
return _tito_from_checkpoint(_image_to_vec, checkpoint, INCEPTION_EXCLUDED_VARIABLES)
|
Creates a tensor-in-tensor-out function that produces embeddings from image bytes.
Image to embedding is implemented with Tensorflow's inception v3 model and a pretrained
checkpoint. It returns 1x2048 'PreLogits' embeddings for each image.
Args:
feature_name: The name of the feature. Used only to identify the image tensors so
we can get gradients for probe in image prediction explaining.
tmp_dir: a local directory that is used for downloading the checkpoint. If
non, a temp folder will be made and deleted.
checkpoint: the inception v3 checkpoint gs or local path. If None, default checkpoint
is used.
Returns: a tensor-in-tensor-out function that takes image string tensor and returns embeddings.
|
def get_hints(self, plugin):
''' Return plugin hints from ``plugin``. '''
hints = []
for hint_name in getattr(plugin, 'hints', []):
hint_plugin = self._plugins.get(hint_name)
if hint_plugin:
hint_result = Result(
name=hint_plugin.name,
homepage=hint_plugin.homepage,
from_url=self.requested_url,
type=HINT_TYPE,
plugin=plugin.name,
)
hints.append(hint_result)
logger.debug(f'{plugin.name} & hint {hint_result.name} detected')
else:
logger.error(f'{plugin.name} hints an invalid plugin: {hint_name}')
return hints
|
Return plugin hints from ``plugin``.
|
def _get_instance(self):
"""Retrieve instance matching instance_id."""
try:
instance = self.compute_driver.ex_get_node(
self.running_instance_id,
zone=self.region
)
except ResourceNotFoundError as e:
raise GCECloudException(
'Instance with id: {id} cannot be found: {error}'.format(
id=self.running_instance_id, error=e
)
)
return instance
|
Retrieve instance matching instance_id.
|
def intersection(self, other, ignore_conflicts=False):
"""Return a new definition from the intersection of the definitions."""
result = self.copy()
result.intersection_update(other, ignore_conflicts)
return result
|
Return a new definition from the intersection of the definitions.
|
def _assign_method(self, resource_class, method_type):
"""
Exactly the same code as the original except:
- uid is now first parameter (after self). Therefore, no need to explicitly call 'uid='
- Ignored the other http methods besides GET (as they are not needed for the pokeapi.co API)
- Added cache wrapping function
- Added a way to list all get methods
"""
method_name = resource_class.get_method_name(
resource_class, method_type)
valid_status_codes = getattr(
resource_class.Meta,
'valid_status_codes',
DEFAULT_VALID_STATUS_CODES
)
# uid is now the first argument (after self)
@self._cache
def get(self, uid=None, method_type=method_type,
method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, **kwargs):
uid = uid.lower() if isinstance(uid, str) else uid
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
# only GET method is used
setattr(
self, method_name,
types.MethodType(get, self)
)
# for easier listing of get methods
self._all_get_methods_names.append(method_name)
|
Exactly the same code as the original except:
- uid is now first parameter (after self). Therefore, no need to explicitly call 'uid='
- Ignored the other http methods besides GET (as they are not needed for the pokeapi.co API)
- Added cache wrapping function
- Added a way to list all get methods
|
def clean_metric_name(self, metric_name):
"""
Make sure the metric is free of control chars, spaces, tabs, etc.
"""
if not self._clean_metric_name:
return metric_name
metric_name = str(metric_name)
for _from, _to in self.cleaning_replacement_list:
metric_name = metric_name.replace(_from, _to)
return metric_name
|
Make sure the metric is free of control chars, spaces, tabs, etc.
|
def pm(client, event, channel, nick, rest):
'Arggh matey'
if rest:
rest = rest.strip()
Karma.store.change(rest, 2)
rcpt = rest
else:
rcpt = channel
if random.random() > 0.95:
return f"Arrggh ye be doin' great, grand work, {rcpt}!"
return f"Arrggh ye be doin' good work, {rcpt}!"
|
Arggh matey
|
def from_description(cls, description, attrs):
""" Create an object from a dynamo3 response """
hash_key = None
range_key = None
index_type = description["Projection"]["ProjectionType"]
includes = description["Projection"].get("NonKeyAttributes")
for data in description["KeySchema"]:
name = data["AttributeName"]
if name not in attrs:
continue
key_type = data["KeyType"]
if key_type == "HASH":
hash_key = TableField(name, attrs[name].data_type, key_type)
elif key_type == "RANGE":
range_key = TableField(name, attrs[name].data_type, key_type)
throughput = description["ProvisionedThroughput"]
return cls(
description["IndexName"],
index_type,
description["IndexStatus"],
hash_key,
range_key,
throughput["ReadCapacityUnits"],
throughput["WriteCapacityUnits"],
description.get("IndexSizeBytes", 0),
includes,
description,
)
|
Create an object from a dynamo3 response
|
def facts(self):
"""Iterate over the asserted Facts."""
fact = lib.EnvGetNextFact(self._env, ffi.NULL)
while fact != ffi.NULL:
yield new_fact(self._env, fact)
fact = lib.EnvGetNextFact(self._env, fact)
|
Iterate over the asserted Facts.
|
def create_base_logger(config=None, parallel=None):
"""Setup base logging configuration, also handling remote logging.
Correctly sets up for local, multiprocessing and distributed runs.
Creates subscribers for non-local runs that will be references from
local logging.
Retrieves IP address using tips from http://stackoverflow.com/a/1267524/252589
"""
if parallel is None: parallel = {}
parallel_type = parallel.get("type", "local")
cores = parallel.get("cores", 1)
if parallel_type == "ipython":
from bcbio.log import logbook_zmqpush
fqdn_ip = socket.gethostbyname(socket.getfqdn())
ips = [fqdn_ip] if (fqdn_ip and not fqdn_ip.startswith("127.")) else []
if not ips:
ips = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
if not ip.startswith("127.")]
if not ips:
ips += [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())[1] for s in
[socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]]
if not ips:
sys.stderr.write("Cannot resolve a local IP address that isn't 127.x.x.x "
"Your machines might not have a local IP address "
"assigned or are not able to resolve it.\n")
sys.exit(1)
uri = "tcp://%s" % ips[0]
subscriber = logbook_zmqpush.ZeroMQPullSubscriber()
mport = subscriber.socket.bind_to_random_port(uri)
wport_uri = "%s:%s" % (uri, mport)
parallel["log_queue"] = wport_uri
subscriber.dispatch_in_background(_create_log_handler(config, True))
elif cores > 1:
subscriber = IOSafeMultiProcessingSubscriber(mpq)
subscriber.dispatch_in_background(_create_log_handler(config))
else:
# Do not need to setup anything for local logging
pass
return parallel
|
Setup base logging configuration, also handling remote logging.
Correctly sets up for local, multiprocessing and distributed runs.
Creates subscribers for non-local runs that will be references from
local logging.
Retrieves IP address using tips from http://stackoverflow.com/a/1267524/252589
|
def pre_serialize(self, raw, pkt, i):
'''
Set length of the header based on
'''
self.length = len(raw) + OpenflowHeader._MINLEN
|
Set length of the header based on
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.