code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def delete_permanent(self, list_id, subscriber_hash):
"""
Delete permanently a member from a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param subscriber_hash: The MD5 hash of the lowercase version of the
list member’s email address.
:type subscriber_hash: :py:class:`str`
"""
subscriber_hash = check_subscriber_hash(subscriber_hash)
self.list_id = list_id
self.subscriber_hash = subscriber_hash
return self._mc_client._post(url=self._build_path(list_id, 'members', subscriber_hash, 'actions', 'delete-permanent')) | Delete permanently a member from a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param subscriber_hash: The MD5 hash of the lowercase version of the
list member’s email address.
:type subscriber_hash: :py:class:`str` |
def rdf_suffix(fmt: str) -> str:
""" Map the RDF format to the approproate suffix """
for k, v in SUFFIX_FORMAT_MAP.items():
if fmt == v:
return k
return 'rdf' | Map the RDF format to the approproate suffix |
def prepare_token_revocation_request(self, revocation_url, token,
token_type_hint="access_token", body='', callback=None, **kwargs):
"""Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param body:
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block: http
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients.
"""
if not is_secure_transport(revocation_url):
raise InsecureTransportError()
return prepare_token_revocation_request(revocation_url, token,
token_type_hint=token_type_hint, body=body, callback=callback,
**kwargs) | Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param body:
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block: http
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients. |
def create_pipeline_stage(self, pipeline_key, name, **kwargs):
'''Creates a pipeline stage with the provided attributes.
Args:
name required name string
kwargs {..} see StreakStage object for details
return (status code, stage dict)
'''
#req sanity check
if not (pipeline_key and name):
return requests.codes.bad_request, None
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline_key,
self.stages_suffix])
kwargs.update({'name':name})
new_box = StreakStage(**kwargs)
code, data = self._req('put', uri, new_box.to_dict(rw = True))
return code, data | Creates a pipeline stage with the provided attributes.
Args:
name required name string
kwargs {..} see StreakStage object for details
return (status code, stage dict) |
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self._can_uninstall():
return
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
) | Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True). |
def key_changed(self):
"""Checks if the key name and value fields have been set, and updates the add key button"""
if self.key_name.get() and self.key_val.get():
self.button_key_add.state(["!disabled"])
else:
self.button_key_add.state(["disabled"]) | Checks if the key name and value fields have been set, and updates the add key button |
def is_valid_resource_name(rname, exception_type=None):
"""Validates the given resource name to ARM guidelines, individual services may be more restrictive.
:param rname: The resource name being validated.
:type rname: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the name is valid.
:rtype: bool
"""
match = _ARMNAME_RE.match(rname)
if match:
return True
if exception_type:
raise exception_type()
return False | Validates the given resource name to ARM guidelines, individual services may be more restrictive.
:param rname: The resource name being validated.
:type rname: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the name is valid.
:rtype: bool |
def user_list(self, params=None):
"""Lists all users within the tenant."""
uri = 'openstack/users'
if params:
uri += '?%s' % urllib.urlencode(params)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body) | Lists all users within the tenant. |
def ipaddr(value, options=None):
'''
Filters and returns only valid IP objects.
'''
ipv4_obj = ipv4(value, options=options)
ipv6_obj = ipv6(value, options=options)
if ipv4_obj is None or ipv6_obj is None:
# an IP address can be either IPv4 either IPv6
# therefofe if the value passed as arg is not a list, at least one of the calls above will return None
# if one of them is none, means that we should return only one of them
return ipv4_obj or ipv6_obj # one of them
else:
return ipv4_obj + ipv6_obj | Filters and returns only valid IP objects. |
def _check_for_dictionary_key(self, logical_id, dictionary, keys):
"""
Checks a dictionary to make sure it has a specific key. If it does not, an
InvalidResourceException is thrown.
:param string logical_id: logical id of this resource
:param dict dictionary: the dictionary to check
:param list keys: list of keys that should exist in the dictionary
"""
for key in keys:
if key not in dictionary:
raise InvalidResourceException(logical_id, 'Resource is missing the required [{}] '
'property.'.format(key)) | Checks a dictionary to make sure it has a specific key. If it does not, an
InvalidResourceException is thrown.
:param string logical_id: logical id of this resource
:param dict dictionary: the dictionary to check
:param list keys: list of keys that should exist in the dictionary |
def modify(self, management_address=None, username=None, password=None,
connection_type=None):
"""
Modifies a remote system for remote replication.
:param management_address: same as the one in `create` method.
:param username: username for accessing the remote system.
:param password: password for accessing the remote system.
:param connection_type: same as the one in `create` method.
"""
req_body = self._cli.make_body(
managementAddress=management_address, username=username,
password=password, connectionType=connection_type)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp | Modifies a remote system for remote replication.
:param management_address: same as the one in `create` method.
:param username: username for accessing the remote system.
:param password: password for accessing the remote system.
:param connection_type: same as the one in `create` method. |
def load(store):
"""Load data from an array or group into memory.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
Returns
-------
out
If the store contains an array, out will be a numpy array. If the store contains
a group, out will be a dict-like object where keys are array names and values
are numpy arrays.
See Also
--------
save, savez
Notes
-----
If loading data from a group of arrays, data will not be immediately loaded into
memory. Rather, arrays will be loaded into memory as they are requested.
"""
# handle polymorphic store arg
store = normalize_store_arg(store)
if contains_array(store, path=None):
return Array(store=store, path=None)[...]
elif contains_group(store, path=None):
grp = Group(store=store, path=None)
return LazyLoader(grp) | Load data from an array or group into memory.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
Returns
-------
out
If the store contains an array, out will be a numpy array. If the store contains
a group, out will be a dict-like object where keys are array names and values
are numpy arrays.
See Also
--------
save, savez
Notes
-----
If loading data from a group of arrays, data will not be immediately loaded into
memory. Rather, arrays will be loaded into memory as they are requested. |
def process_requests(self, requests, **context):
"""
Turns a list of request objects into a list of
response objects.
:param requests: A list of tuples describing the RPC call
:type requests: list[list[callable,object,object,list]]
:param context:
A dict with additional parameters passed to handle_request_string and process_requests
Allows wrapping code to pass additional parameters deep into parsing stack, override this
method and fold the parameters as needed into tha method call.
Imagine capturing authentication / permissions data from headers, converting them into
actionable / flag objects and putting them into **context.
Then override this method and fold the arguments into the call
(which may be a decorated function, where decorator unfolds the params and calls the actual method)
By default, context is not passed to method call below.
"""
ds = self._data_serializer
responses = []
for method, params, request_id, error in requests:
if error: # these are request message validation errors
if error.request_id: # no ID = Notification. We don't reply
responses.append(ds.assemble_error_response(error))
continue
if method not in self:
if request_id:
responses.append(ds.assemble_error_response(
errors.RPCMethodNotFound(
'Method "%s" is not found.' % method,
request_id
)
))
continue
try:
args = []
kwargs = {}
if isinstance(params, dict):
kwargs = params
elif params: # and/or must be type(params, list):
args = params
result = self.process_method(
self[method],
args,
kwargs,
request_id=request_id,
**context
)
if request_id:
responses.append(ds.assemble_response(result, request_id))
except errors.RPCFault as ex:
if request_id:
responses.append(ds.assemble_error_response(ex))
except Exception as ex:
if request_id:
responses.append(ds.assemble_error_response(
errors.RPCInternalError(
'While processing the follwoing message ("%s","%s","%s") ' % (method, params, request_id) +\
'encountered the following error message "%s"' % ex.message,
request_id=request_id,
message=ex.message
)
))
return responses | Turns a list of request objects into a list of
response objects.
:param requests: A list of tuples describing the RPC call
:type requests: list[list[callable,object,object,list]]
:param context:
A dict with additional parameters passed to handle_request_string and process_requests
Allows wrapping code to pass additional parameters deep into parsing stack, override this
method and fold the parameters as needed into tha method call.
Imagine capturing authentication / permissions data from headers, converting them into
actionable / flag objects and putting them into **context.
Then override this method and fold the arguments into the call
(which may be a decorated function, where decorator unfolds the params and calls the actual method)
By default, context is not passed to method call below. |
def send_zone_event(self, zone_id, event_name, *args):
""" Send an event to a zone. """
cmd = "EVENT %s!%s %s" % (
zone_id.device_str(), event_name,
" ".join(str(x) for x in args))
return (yield from self._send_cmd(cmd)) | Send an event to a zone. |
def unzip_file_to_dir(path_to_zip, output_directory):
"""
Extract a ZIP archive to a directory
"""
z = ZipFile(path_to_zip, 'r')
z.extractall(output_directory)
z.close() | Extract a ZIP archive to a directory |
def naturalize_string(key):
"""Analyzes string in a human way to enable natural sort
:param nodename: The node name to analyze
:returns: A structure that can be consumed by 'sorted'
"""
return [int(text) if text.isdigit() else text.lower()
for text in re.split(numregex, key)] | Analyzes string in a human way to enable natural sort
:param nodename: The node name to analyze
:returns: A structure that can be consumed by 'sorted' |
def bookDF(symbol, token='', version=''):
'''Book data
https://iextrading.com/developer/docs/#book
realtime during Investors Exchange market hours
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = book(symbol, token, version)
df = _bookToDF(x)
return df | Book data
https://iextrading.com/developer/docs/#book
realtime during Investors Exchange market hours
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result |
def apply(self, func, shortcut=False, args=(), **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs))
for arr in grouped)
return self._combine(applied, shortcut=shortcut) | Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array. |
def Main(url):
'''
Entry Point.
Args:
url: target url.
'''
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
sentence_list = nlp_base.listup_sentence(document)
batch_size = 10
if len(sentence_list) < batch_size:
raise ValueError("The number of extracted sentences is insufficient.")
all_token_list = []
for i in range(len(sentence_list)):
nlp_base.tokenize(sentence_list[i])
all_token_list.extend(nlp_base.token)
sentence_list[i] = nlp_base.token
vectorlizable_sentence = LSTMRTRBM()
vectorlizable_sentence.learn(
sentence_list=sentence_list,
token_master_list=list(set(all_token_list)),
hidden_neuron_count=1000,
batch_size=batch_size,
learning_rate=1e-03,
seq_len=5
)
test_list = sentence_list[:batch_size]
feature_points_arr = vectorlizable_sentence.vectorize(test_list)
print("Feature points (Top 5 sentences):")
print(feature_points_arr) | Entry Point.
Args:
url: target url. |
def _load_wurlitzer(self):
"""Load wurlitzer extension."""
# Wurlitzer has no effect on Windows
if not os.name == 'nt':
from IPython.core.getipython import get_ipython
# Enclose this in a try/except because if it fails the
# console will be totally unusable.
# Fixes spyder-ide/spyder#8668
try:
get_ipython().run_line_magic('reload_ext', 'wurlitzer')
except Exception:
pass | Load wurlitzer extension. |
def _op_generic_HAdd(self, args):
"""
Halving add, for some ARM NEON instructions.
"""
components = []
for a, b in self.vector_args(args):
if self.is_signed:
a = a.sign_extend(self._vector_size)
b = b.sign_extend(self._vector_size)
else:
a = a.zero_extend(self._vector_size)
b = b.zero_extend(self._vector_size)
components.append((a + b)[self._vector_size:1])
return claripy.Concat(*components) | Halving add, for some ARM NEON instructions. |
def send(scope, data):
"""
Like exec(), but does not wait for a response of the remote host after
sending the command.
:type data: string
:param data: The data that is sent.
"""
conn = scope.get('__connection__')
for line in data:
conn.send(line)
return True | Like exec(), but does not wait for a response of the remote host after
sending the command.
:type data: string
:param data: The data that is sent. |
def get_date_type(calendar):
"""Return the cftime date type for a given calendar name."""
try:
import cftime
except ImportError:
raise ImportError(
'cftime is required for dates with non-standard calendars')
else:
calendars = {
'noleap': cftime.DatetimeNoLeap,
'360_day': cftime.Datetime360Day,
'365_day': cftime.DatetimeNoLeap,
'366_day': cftime.DatetimeAllLeap,
'gregorian': cftime.DatetimeGregorian,
'proleptic_gregorian': cftime.DatetimeProlepticGregorian,
'julian': cftime.DatetimeJulian,
'all_leap': cftime.DatetimeAllLeap,
'standard': cftime.DatetimeGregorian
}
return calendars[calendar] | Return the cftime date type for a given calendar name. |
def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts):
'''
Get list of all networks. This is helpful when looking up subnets to use
with func:nextavailableip
This call is offen slow and not cached!
some return_fields
comment,network,network_view,ddns_domainname,disable,enable_ddns
CLI Example:
.. code-block:: bash
salt-call infoblox.get_network
'''
infoblox = _get_infoblox(**api_opts)
return infoblox.get_network(ipv4addr=ipv4addr, network=network, return_fields=return_fields) | Get list of all networks. This is helpful when looking up subnets to use
with func:nextavailableip
This call is offen slow and not cached!
some return_fields
comment,network,network_view,ddns_domainname,disable,enable_ddns
CLI Example:
.. code-block:: bash
salt-call infoblox.get_network |
def requestSubsystem(self, subsystem):
"""Request a subsystem and return a deferred reply.
"""
data = common.NS(subsystem)
return self.sendRequest('subsystem', data, wantReply=True) | Request a subsystem and return a deferred reply. |
def drop_dose(self):
"""
Drop the maximum dose and related response values.
"""
doses = np.array(self.individual_doses)
responses = np.array(self.responses)
mask = doses != doses.max()
self.individual_doses = doses[mask].tolist()
self.responses = responses[mask].tolist()
self.set_summary_data()
self._validate() | Drop the maximum dose and related response values. |
def DbExportDevice(self, argin):
""" Export a device to the database
:param argin: Str[0] = Device name
Str[1] = CORBA IOR
Str[2] = Device server process host name
Str[3] = Device server process PID or string ``null``
Str[4] = Device server process version
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbExportDevice()")
if len(argin) < 5:
self.warn_stream("DataBase::DbExportDevice(): insufficient export info for device ")
th_exc(DB_IncorrectArguments,
"insufficient export info for device",
"DataBase::ExportDevice()")
dev_name, IOR, host, pid, version = argin[:5]
dev_name = dev_name.lower()
if pid.lower() == 'null':
pid = "-1"
self.db.export_device(dev_name, IOR, host, pid, version) | Export a device to the database
:param argin: Str[0] = Device name
Str[1] = CORBA IOR
Str[2] = Device server process host name
Str[3] = Device server process PID or string ``null``
Str[4] = Device server process version
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid |
def feed(f, limit=25):
"""
Pull a feed
:param f: feed name (eg: csirtgadgetes/correlated)
:param limit: return value limit (default 25)
:return: Feed dict
"""
if '/' not in f:
raise ValueError('feed name must be formatted like: '
'csirtgadgets/scanners')
user, f = f.split('/')
return Feed().show(user, f, limit=limit) | Pull a feed
:param f: feed name (eg: csirtgadgetes/correlated)
:param limit: return value limit (default 25)
:return: Feed dict |
def is_identifier(s):
"""Check whether given string is a valid Python identifier.
Note that this excludes language keywords, even though they exhibit
a general form of an identifier. See also :func:`has_identifier_form`.
:param s: String to check
:return: Whether ``s`` is a valid Python identifier
"""
ensure_string(s)
if not IDENTIFIER_FORM_RE.match(s):
return False
if is_keyword(s):
return False
# ``None`` is not part of ``keyword.kwlist`` in Python 2.x,
# so we need to check for it explicitly
if s == 'None' and not IS_PY3:
return False
return True | Check whether given string is a valid Python identifier.
Note that this excludes language keywords, even though they exhibit
a general form of an identifier. See also :func:`has_identifier_form`.
:param s: String to check
:return: Whether ``s`` is a valid Python identifier |
def detect_loud_glitches(strain, psd_duration=4., psd_stride=2.,
psd_avg_method='median', low_freq_cutoff=30.,
threshold=50., cluster_window=5., corrupt_time=4.,
high_freq_cutoff=None, output_intermediates=False):
"""Automatic identification of loud transients for gating purposes.
This function first estimates the PSD of the input time series using the
FindChirp Welch method. Then it whitens the time series using that
estimate. Finally, it computes the magnitude of the whitened series,
thresholds it and applies the FindChirp clustering over time to the
surviving samples.
Parameters
----------
strain : TimeSeries
Input strain time series to detect glitches over.
psd_duration : {float, 4}
Duration of the segments for PSD estimation in seconds.
psd_stride : {float, 2}
Separation between PSD estimation segments in seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq_cutoff : {float, 30}
Minimum frequency to include in the whitened strain.
threshold : {float, 50}
Minimum magnitude of whitened strain for considering a transient to
be present.
cluster_window : {float, 5}
Length of time window to cluster surviving samples over, in seconds.
corrupt_time : {float, 4}
Amount of time to be discarded at the beginning and end of the input
time series.
high_frequency_cutoff : {float, None}
Maximum frequency to include in the whitened strain. If given, the
input series is downsampled accordingly. If omitted, the Nyquist
frequency is used.
output_intermediates : {bool, False}
Save intermediate time series for debugging.
"""
logging.info('Autogating: tapering strain')
taper_length = int(corrupt_time * strain.sample_rate)
w = numpy.arange(taper_length) / float(taper_length)
strain[0:taper_length] *= pycbc.types.Array(w, dtype=strain.dtype)
strain[(len(strain)-taper_length):] *= pycbc.types.Array(w[::-1],
dtype=strain.dtype)
# don't waste time trying to optimize a single FFT
pycbc.fft.fftw.set_measure_level(0)
if high_freq_cutoff:
logging.info('Autogating: downsampling strain')
strain = resample_to_delta_t(strain, 0.5 / high_freq_cutoff,
method='ldas')
if output_intermediates:
strain.save_to_wav('strain_conditioned.wav')
corrupt_length = int(corrupt_time * strain.sample_rate)
# zero-pad strain to a power-of-2 length
strain_pad_length = next_power_of_2(len(strain))
pad_start = strain_pad_length/2 - len(strain)/2
pad_end = pad_start + len(strain)
strain_pad = pycbc.types.TimeSeries(
pycbc.types.zeros(strain_pad_length, dtype=strain.dtype),
delta_t=strain.delta_t, copy=False,
epoch=strain.start_time-pad_start/strain.sample_rate)
strain_pad[pad_start:pad_end] = strain[:]
logging.info('Autogating: estimating PSD')
psd = pycbc.psd.welch(strain[corrupt_length:(len(strain)-corrupt_length)],
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method,
require_exact_data_fit=False)
psd = pycbc.psd.interpolate(psd, 1./strain_pad.duration)
psd = pycbc.psd.inverse_spectrum_truncation(
psd, int(psd_duration * strain.sample_rate),
low_frequency_cutoff=low_freq_cutoff,
trunc_method='hann')
kmin = int(low_freq_cutoff / psd.delta_f)
psd[0:kmin] = numpy.inf
if high_freq_cutoff:
kmax = int(high_freq_cutoff / psd.delta_f)
psd[kmax:] = numpy.inf
logging.info('Autogating: time -> frequency')
strain_tilde = pycbc.types.FrequencySeries(
pycbc.types.zeros(len(strain_pad) / 2 + 1,
dtype=pycbc.types.complex_same_precision_as(strain)),
delta_f=psd.delta_f, copy=False)
pycbc.fft.fft(strain_pad, strain_tilde)
logging.info('Autogating: whitening')
if high_freq_cutoff:
norm = high_freq_cutoff - low_freq_cutoff
else:
norm = strain.sample_rate/2. - low_freq_cutoff
strain_tilde *= (psd * norm) ** (-0.5)
logging.info('Autogating: frequency -> time')
pycbc.fft.ifft(strain_tilde, strain_pad)
pycbc.fft.fftw.set_measure_level(pycbc.fft.fftw._default_measurelvl)
if output_intermediates:
strain_pad[pad_start:pad_end].save_to_wav('strain_whitened.wav')
logging.info('Autogating: computing magnitude')
mag = abs(strain_pad[pad_start:pad_end])
if output_intermediates:
mag.save('strain_whitened_mag.npy')
mag = mag.numpy()
# remove strain corrupted by filters at the ends
mag[0:corrupt_length] = 0
mag[-1:-corrupt_length-1:-1] = 0
logging.info('Autogating: finding loud peaks')
indices = numpy.where(mag > threshold)[0]
cluster_idx = pycbc.events.findchirp_cluster_over_window(
indices, numpy.array(mag[indices]),
int(cluster_window*strain.sample_rate))
times = [idx * strain.delta_t + strain.start_time \
for idx in indices[cluster_idx]]
return times | Automatic identification of loud transients for gating purposes.
This function first estimates the PSD of the input time series using the
FindChirp Welch method. Then it whitens the time series using that
estimate. Finally, it computes the magnitude of the whitened series,
thresholds it and applies the FindChirp clustering over time to the
surviving samples.
Parameters
----------
strain : TimeSeries
Input strain time series to detect glitches over.
psd_duration : {float, 4}
Duration of the segments for PSD estimation in seconds.
psd_stride : {float, 2}
Separation between PSD estimation segments in seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq_cutoff : {float, 30}
Minimum frequency to include in the whitened strain.
threshold : {float, 50}
Minimum magnitude of whitened strain for considering a transient to
be present.
cluster_window : {float, 5}
Length of time window to cluster surviving samples over, in seconds.
corrupt_time : {float, 4}
Amount of time to be discarded at the beginning and end of the input
time series.
high_frequency_cutoff : {float, None}
Maximum frequency to include in the whitened strain. If given, the
input series is downsampled accordingly. If omitted, the Nyquist
frequency is used.
output_intermediates : {bool, False}
Save intermediate time series for debugging. |
def get_mnist_iter(args, kv):
"""
create data iterator with NDArrayIter
"""
(train_lbl, train_img) = read_data(
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz')
(val_lbl, val_img) = read_data(
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz')
train = mx.io.NDArrayIter(
to4d(train_img), train_lbl, args.batch_size, shuffle=True)
val = mx.io.NDArrayIter(
to4d(val_img), val_lbl, args.batch_size)
return (train, val) | create data iterator with NDArrayIter |
def search_meta(self, attr, value=None, stronly=False):
""" Get a list of Symbols by searching a specific meta
attribute, and optionally the value.
Parameters
----------
attr : str
The meta attribute to query.
value : None, str or list
The meta attribute to query. If you pass a float, or an int,
it'll be converted to a string, prior to searching.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
Returns
-------
List of Symbols or empty list
"""
if stronly:
qry = self.ses.query(Symbol.name).join(SymbolMeta)
else:
qry = self.ses.query(Symbol).join(SymbolMeta)
crits = []
if value is None:
crits.append(SymbolMeta.attr == attr)
else:
if isinstance(value, str):
values = [value]
elif isinstance(value, (tuple, list)):
values = value
for v in values:
crits.append(and_(SymbolMeta.attr == attr, SymbolMeta.value.like(value)))
if len(crits):
qry = qry.filter(or_(*crits))
qry = qry.order_by(Symbol.name)
if stronly:
return [sym[0] for sym in qry.distinct()]
else:
return [sym for sym in qry.distinct()] | Get a list of Symbols by searching a specific meta
attribute, and optionally the value.
Parameters
----------
attr : str
The meta attribute to query.
value : None, str or list
The meta attribute to query. If you pass a float, or an int,
it'll be converted to a string, prior to searching.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
Returns
-------
List of Symbols or empty list |
def request_pdu(self):
""" Build request PDU to write single coil.
:return: Byte array of 5 bytes with PDU.
"""
if None in [self.address, self.value]:
# TODO Raise proper exception.
raise Exception
return struct.pack('>BHH', self.function_code, self.address,
self._value) | Build request PDU to write single coil.
:return: Byte array of 5 bytes with PDU. |
def atleast(cls, lits, bound=1, top_id=None, encoding=EncType.seqcounter):
"""
This method can be used for creating a CNF encoding of an AtLeastK
constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method
takes 1 mandatory argument ``lits`` and 3 default arguments can be
specified: ``bound``, ``top_id``, and ``encoding``.
:param lits: a list of literals in the sum.
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param encoding: identifier of the encoding to use.
:type lits: iterable(int)
:type bound: int
:type top_id: integer or None
:type encoding: integer
Parameter ``top_id`` serves to increase integer identifiers of
auxiliary variables introduced during the encoding process. This is
helpful when augmenting an existing CNF formula with the new
cardinality encoding to make sure there is no collision between
identifiers of the variables. If specified the identifiers of the
first auxiliary variable will be ``top_id+1``.
The default value of ``encoding`` is :attr:`Enctype.seqcounter`.
The method *translates* the AtLeast constraint into an AtMost
constraint by *negating* the literals of ``lits``, creating a new
bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the
modified list of literals and the new bound.
:raises CardEnc.NoSuchEncodingError: if encoding does not exist.
:rtype: a :class:`.CNFPlus` object where the new \
clauses (or the new native atmost constraint) are stored.
"""
if encoding < 0 or encoding > 9:
raise(NoSuchEncodingError(encoding))
if not top_id:
top_id = max(map(lambda x: abs(x), lits))
# we are going to return this formula
ret = CNFPlus()
# Minicard's native representation is handled separately
if encoding == 9:
ret.atmosts, ret.nv = [([-l for l in lits], len(lits) - bound)], top_id
return ret
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
res = pycard.encode_atleast(lits, bound, top_id, encoding)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if res:
ret.clauses, ret.nv = res
return ret | This method can be used for creating a CNF encoding of an AtLeastK
constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method
takes 1 mandatory argument ``lits`` and 3 default arguments can be
specified: ``bound``, ``top_id``, and ``encoding``.
:param lits: a list of literals in the sum.
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param encoding: identifier of the encoding to use.
:type lits: iterable(int)
:type bound: int
:type top_id: integer or None
:type encoding: integer
Parameter ``top_id`` serves to increase integer identifiers of
auxiliary variables introduced during the encoding process. This is
helpful when augmenting an existing CNF formula with the new
cardinality encoding to make sure there is no collision between
identifiers of the variables. If specified the identifiers of the
first auxiliary variable will be ``top_id+1``.
The default value of ``encoding`` is :attr:`Enctype.seqcounter`.
The method *translates* the AtLeast constraint into an AtMost
constraint by *negating* the literals of ``lits``, creating a new
bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the
modified list of literals and the new bound.
:raises CardEnc.NoSuchEncodingError: if encoding does not exist.
:rtype: a :class:`.CNFPlus` object where the new \
clauses (or the new native atmost constraint) are stored. |
def export_file(self, data_object, destination_directory=None,
destination_filename=None, retry=False,
export_metadata=False, export_raw_file=True):
"""Export a file from Loom to some file storage location.
Default destination_directory is cwd. Default destination_filename is the
filename from the file data object associated with the given file_id.
"""
if not destination_directory:
destination_directory = os.getcwd()
# We get filename from the dataobject
if not destination_filename:
destination_filename = data_object['value']['filename']
destination_file_url = os.path.join(destination_directory,
destination_filename)
logger.info('Exporting file %s@%s ...' % (
data_object['value']['filename'],
data_object['uuid']))
if export_raw_file:
destination = File(
destination_file_url, self.storage_settings, retry=retry)
if destination.exists():
raise FileAlreadyExistsError(
'File already exists at %s' % destination_file_url)
logger.info('...copying file to %s' % (
destination.get_url()))
# Copy from the first file location
file_resource = data_object.get('value')
md5 = file_resource.get('md5')
source_url = data_object['value']['file_url']
File(source_url, self.storage_settings, retry=retry).copy_to(
destination, expected_md5=md5)
data_object['value'] = self._create_new_file_resource(
data_object['value'], destination.get_url())
else:
logger.info('...skipping raw file')
if export_metadata:
data_object['value'].pop('link', None)
data_object['value'].pop('upload_status', None)
destination_metadata_url = os.path.join(
destination_file_url + '.metadata.yaml')
logger.info('...writing metadata to %s' % destination_metadata_url)
metadata = yaml.safe_dump(data_object, default_flow_style=False)
metadata_file = File(destination_metadata_url,
self.storage_settings, retry=retry)
metadata_file.write(metadata)
else:
logger.info('...skipping metadata')
logger.info('...finished file export') | Export a file from Loom to some file storage location.
Default destination_directory is cwd. Default destination_filename is the
filename from the file data object associated with the given file_id. |
def flip(self, axis=HORIZONTAL):
"""Flips the layer, either HORIZONTAL or VERTICAL.
"""
if axis == HORIZONTAL:
self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT)
if axis == VERTICAL:
self.img = self.img.transpose(Image.FLIP_TOP_BOTTOM) | Flips the layer, either HORIZONTAL or VERTICAL. |
def _add_meta_info(self, eopatch, request_params, service_type):
""" Adds any missing metadata info to EOPatch """
for param, eoparam in zip(['time', 'time_difference', 'maxcc'], ['time_interval', 'time_difference', 'maxcc']):
if eoparam not in eopatch.meta_info:
eopatch.meta_info[eoparam] = request_params[param]
if 'service_type' not in eopatch.meta_info:
eopatch.meta_info['service_type'] = service_type.value
for param in ['size_x', 'size_y']:
if param not in eopatch.meta_info:
eopatch.meta_info[param] = getattr(self, param)
if eopatch.bbox is None:
eopatch.bbox = request_params['bbox'] | Adds any missing metadata info to EOPatch |
def get_gitlab_project(self):
"""Get numerical GitLab Project ID.
Returns:
int: Project ID number.
Raises:
foremast.exceptions.GitLabApiError: GitLab responded with bad status
code.
"""
self.server = gitlab.Gitlab(GIT_URL, private_token=GITLAB_TOKEN, api_version=4)
project = self.server.projects.get(self.git_short)
if not project:
raise GitLabApiError('Could not get Project "{0}" from GitLab API.'.format(self.git_short))
self.project = project
return self.project | Get numerical GitLab Project ID.
Returns:
int: Project ID number.
Raises:
foremast.exceptions.GitLabApiError: GitLab responded with bad status
code. |
def _load(self, scale=0.001):
"""Load the OLCI relative spectral responses
"""
ncf = Dataset(self.path, 'r')
bandnum = OLCI_BAND_NAMES.index(self.bandname)
# cam = 0
# view = 0
# resp = ncf.variables[
# 'spectral_response_function'][bandnum, cam, view, :]
# wvl = ncf.variables[
# 'spectral_response_function_wavelength'][bandnum, cam, view, :] * scale
resp = ncf.variables[
'mean_spectral_response_function'][bandnum, :]
wvl = ncf.variables[
'mean_spectral_response_function_wavelength'][bandnum, :] * scale
self.rsr = {'wavelength': wvl, 'response': resp} | Load the OLCI relative spectral responses |
def MakeClass(descriptor):
"""Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor.
"""
if descriptor in MESSAGE_CLASS_CACHE:
return MESSAGE_CLASS_CACHE[descriptor]
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MakeClass(nested_type)
attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor
result = GeneratedProtocolMessageType(
str(descriptor.name), (message.Message,), attributes)
MESSAGE_CLASS_CACHE[descriptor] = result
return result | Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor. |
def one_hot2indices(one_hots):
"""
Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1]
"""
indices = []
for one_hot in one_hots:
indices.append(argmax(one_hot))
return indices | Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1] |
def IsDir(directory):
'''
:param unicode directory:
A path
:rtype: bool
:returns:
Returns whether the given path points to an existent directory.
:raises NotImplementedProtocol:
If the path protocol is not local or ftp
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
if _UrlIsLocal(directory_url):
return os.path.isdir(directory)
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme) | :param unicode directory:
A path
:rtype: bool
:returns:
Returns whether the given path points to an existent directory.
:raises NotImplementedProtocol:
If the path protocol is not local or ftp
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information |
async def clean_up_clients_async(self):
"""
Resets the pump swallows all exceptions.
"""
if self.partition_receiver:
if self.eh_client:
await self.eh_client.stop_async()
self.partition_receiver = None
self.partition_receive_handler = None
self.eh_client = None | Resets the pump swallows all exceptions. |
def display(obj, detail='phrase'):
""" Friendly string for volume, using sink paths. """
try:
return obj.display(detail=detail)
except AttributeError:
return str(obj) | Friendly string for volume, using sink paths. |
def errors(self):
""" get all the errors
>>> gres = PlayMeta("operation")
>>> res_plus = BasicPlayMeta(Composable(name="plus"))
>>> gres.append(res_plus)
>>> res_plus.add_error(ValueError("invalid data"))
>>> res_moins = BasicPlayMeta(Composable(name="moins"))
>>> gres.append(res_moins)
>>> res_plus.add_error(RuntimeError("server not anwsering"))
>>> gres.errors
[ValueError('invalid data',), RuntimeError('server not anwsering',)]
"""
errors = []
for meta in self:
errors.extend(meta.errors)
return errors | get all the errors
>>> gres = PlayMeta("operation")
>>> res_plus = BasicPlayMeta(Composable(name="plus"))
>>> gres.append(res_plus)
>>> res_plus.add_error(ValueError("invalid data"))
>>> res_moins = BasicPlayMeta(Composable(name="moins"))
>>> gres.append(res_moins)
>>> res_plus.add_error(RuntimeError("server not anwsering"))
>>> gres.errors
[ValueError('invalid data',), RuntimeError('server not anwsering',)] |
def apply_op(input_layer, operation, *op_args, **op_kwargs):
"""Applies the given operation to this before without adding any summaries.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
"""
return input_layer.with_tensor(
operation(input_layer.tensor, *op_args, **op_kwargs)) | Applies the given operation to this before without adding any summaries.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied. |
def _enforce_txt_record_maxlen(key, value):
'''
Enforces the TXT record maximum length of 255 characters.
TXT record length includes key, value, and '='.
:param str key: Key of the TXT record
:param str value: Value of the TXT record
:rtype: str
:return: The value of the TXT record. It may be truncated if it exceeds
the maximum permitted length. In case of truncation, '...' is
appended to indicate that the entire value is not present.
'''
# Add 1 for '=' seperator between key and value
if len(key) + len(value) + 1 > 255:
# 255 - 3 ('...') - 1 ('=') = 251
return value[:251 - len(key)] + '...'
return value | Enforces the TXT record maximum length of 255 characters.
TXT record length includes key, value, and '='.
:param str key: Key of the TXT record
:param str value: Value of the TXT record
:rtype: str
:return: The value of the TXT record. It may be truncated if it exceeds
the maximum permitted length. In case of truncation, '...' is
appended to indicate that the entire value is not present. |
def parse_homer_findpeaks(self):
""" Find HOMER findpeaks logs and parse their data """
self.homer_findpeaks = dict()
for f in self.find_log_files('homer/findpeaks', filehandles=True):
self.parse_findPeaks(f)
# Filter to strip out ignored sample names
self.homer_findpeaks = self.ignore_samples(self.homer_findpeaks)
if len(self.homer_findpeaks) > 0:
# Write parsed report data to a file
self.write_data_file(self.homer_findpeaks, 'multiqc_homer_findpeaks')
# General Stats Table
stats_headers = OrderedDict()
stats_headers['approximate_ip_efficiency'] = {
'title': '% Efficiency',
'description': 'Approximate IP efficiency',
'min': 0,
'max': 100,
'suffix': '%',
'scale': 'RdYlGn'
}
stats_headers['total_peaks'] = {
'title': 'Total Peaks',
'min': 0,
'format': '{:,.0f}',
'scale': 'GnBu'
}
stats_headers['expected_tags_per_peak'] = {
'title': 'Tags/Peak',
'description': 'Expected tags per peak',
'min': 0,
'format': '{:,.0f}',
'scale': 'PuRd'
}
self.general_stats_addcols(self.homer_findpeaks, stats_headers, 'HOMER findpeaks')
return len(self.homer_findpeaks) | Find HOMER findpeaks logs and parse their data |
def seek_previous_line(self):
"""
Seek previous line relative to the current file position.
:return: Position of the line or -1 if previous line was not found.
"""
where = self.file.tell()
offset = 0
while True:
if offset == where:
break
read_size = self.read_size if self.read_size <= where else where
self.file.seek(where - offset - read_size, SEEK_SET)
data_len, data = self.read(read_size)
# Consider the following example: Foo\r | \nBar where " | " denotes current position,
# '\nBar' is the read part and 'Foo\r' is the remaining part.
# We should completely consume terminator "\r\n" by reading one extra byte.
if b'\r\n' in self.LINE_TERMINATORS and data[0] == b'\n'[0]:
terminator_where = self.file.tell()
if terminator_where > data_len + 1:
self.file.seek(where - offset - data_len - 1, SEEK_SET)
terminator_len, terminator_data = self.read(1)
if terminator_data[0] == b'\r'[0]:
data_len += 1
data = b'\r' + data
self.file.seek(terminator_where)
data_where = data_len
while data_where > 0:
terminator = self.suffix_line_terminator(data[:data_where])
if terminator and offset == 0 and data_where == data_len:
# The last character is a line terminator that finishes current line. Ignore it.
data_where -= len(terminator)
elif terminator:
self.file.seek(where - offset - (data_len - data_where))
return self.file.tell()
else:
data_where -= 1
offset += data_len
if where == 0:
# Nothing more to read.
return -1
else:
# Very first line.
self.file.seek(0)
return 0 | Seek previous line relative to the current file position.
:return: Position of the line or -1 if previous line was not found. |
def is_active(self):
"""
Returns True when the start date is today or in the past and the
task has not yet been completed.
"""
start = self.start_date()
return not self.is_completed() and (not start or start <= date.today()) | Returns True when the start date is today or in the past and the
task has not yet been completed. |
def _generate_soma(self):
'''soma'''
radius = self._obj.soma.radius
return _square_segment(radius, (0., -radius)) | soma |
def _on_message(channel, method, header, body):
"""
Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: The Deliver method
:param pika.Spec.BasicProperties properties: The client properties
:param str|unicode body: The message body
"""
print "Message:"
print "\t%r" % method
print "\t%r" % header
print "\t%r" % body
# Acknowledge message receipt
channel.basic_ack(method.delivery_tag)
# when ready, stop consuming
channel.stop_consuming() | Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: The Deliver method
:param pika.Spec.BasicProperties properties: The client properties
:param str|unicode body: The message body |
def tail_disconnect(self, index):
"""Mark all devices disconnected except target in the chain."""
try:
for device in self.devices[index + 1:]:
device.connected = False
except IndexError:
pass | Mark all devices disconnected except target in the chain. |
def parseUrl(url):
"""Return a dict containing scheme, netloc, url, params, query, fragment keys.
query is a dict where the values are always lists. If the query key appears only
once in the URL, the list will have a single value.
"""
scheme, netloc, url, params, query, fragment = urllib.parse.urlparse(url)
query_dict = {
k: sorted(v) if len(v) > 1 else v[0]
for k, v in list(urllib.parse.parse_qs(query).items())
}
return {
'scheme': scheme,
'netloc': netloc,
'url': url,
'params': params,
'query': query_dict,
'fragment': fragment,
} | Return a dict containing scheme, netloc, url, params, query, fragment keys.
query is a dict where the values are always lists. If the query key appears only
once in the URL, the list will have a single value. |
def do_repository_work(self,
repo_name,
repo_tag=None,
docker_executable='docker',
password=None,
force=None,
loglevel=logging.DEBUG,
note=None,
tag=None,
push=None,
export=None,
save=None):
"""Commit, tag, push, tar a docker container based on the configuration we have.
@param repo_name: Name of the repository.
@param docker_executable: Defaults to 'docker'
@param password:
@param force:
@type repo_name: string
@type docker_executable: string
@type password: string
@type force: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# TODO: make host and client configurable
self.handle_note(note)
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
if tag is None:
tag = self.repository['tag']
if push is None:
push = self.repository['push']
if export is None:
export = self.repository['export']
if save is None:
save = self.repository['save']
if not (push or export or save or tag):
# If we're forcing this, then tag as a minimum
if force:
tag = True
else:
return True
shutit_pexpect_child = self.get_shutit_pexpect_session_from_id('host_child').pexpect_child
expect = self.expect_prompts['ORIGIN_ENV']
server = self.repository['server']
repo_user = self.repository['user']
if repo_tag is None:
repo_tag = self.repository['tag_name']
if repo_user and repo_name:
repository = '%s/%s' % (repo_user, repo_name)
repository_tar = '%s%s' % (repo_user, repo_name)
elif repo_user:
repository = repository_tar = repo_user
elif repo_name:
repository = repository_tar = repo_name
else:
repository = repository_tar = ''
if not repository:
self.fail('Could not form valid repository name', shutit_pexpect_child=shutit_pexpect_child, throw_exception=False) # pragma: no cover
if (export or save) and not repository_tar:
self.fail('Could not form valid tar name', shutit_pexpect_child=shutit_pexpect_child, throw_exception=False) # pragma: no cover
if server != '':
repository = '%s/%s' % (server, repository)
if self.build['deps_only']:
repo_tag += '_deps'
if self.repository['suffix_date']:
suffix_date = time.strftime(self.repository['suffix_format'])
repository = '%s%s' % (repository, suffix_date)
repository_tar = '%s%s' % (repository_tar, suffix_date)
if repository != '' and len(repository.split(':')) > 1:
repository_with_tag = repository
repo_tag = repository.split(':')[1]
elif repository != '':
repository_with_tag = repository + ':' + repo_tag
# Commit image
# Only lower case accepted
repository = repository.lower()
repository_with_tag = repository_with_tag.lower()
if server == '' and len(repository) > 30 and push:
self.fail("""repository name: '""" + repository + """' too long to push. If using suffix_date consider shortening, or consider adding "-s repository push no" to your arguments to prevent pushing.""", shutit_pexpect_child=shutit_pexpect_child, throw_exception=False) # pragma: no cover
if self.send(docker_executable + ' commit ' + self.target['container_id'] + ' ' + repository_with_tag,
expect=[expect,' assword'],
shutit_pexpect_child=shutit_pexpect_child,
timeout=99999,
check_exit=False,
loglevel=loglevel) == 1:
self.send(self.host['password'],
expect=expect,
check_exit=False,
record_command=False,
shutit_pexpect_child=shutit_pexpect_child,
echo=False,
loglevel=loglevel)
# Tag image, force it by default
self.build['report'] += '\nBuild tagged as: ' + repository_with_tag
if export or save:
shutit_pexpect_session.pause_point('We are now exporting the container to a bzipped tar file, as configured in\n[repository]\ntar:yes', print_input=False, level=3)
if export:
bzfile = (repository_tar + 'export.tar.bz2')
self.log('Depositing bzip2 of exported container into ' + bzfile,level=logging.DEBUG)
if self.send(docker_executable + ' export ' + self.target['container_id'] + ' | bzip2 - > ' + bzfile,
expect=[expect, 'assword'],
timeout=99999,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel) == 1:
self.send(password,
expect=expect,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel)
self.log('Deposited bzip2 of exported container into ' + bzfile, level=loglevel)
self.log('Run: bunzip2 -c ' + bzfile + ' | sudo docker import - to get this imported into docker.', level=logging.DEBUG)
self.build['report'] += ('\nDeposited bzip2 of exported container into ' + bzfile)
self.build['report'] += ('\nRun:\n\nbunzip2 -c ' + bzfile + ' | sudo docker import -\n\nto get this imported into docker.')
if save:
bzfile = (repository_tar + 'save.tar.bz2')
self.log('Depositing bzip2 of exported container into ' + bzfile,level=logging.DEBUG)
if self.send(docker_executable + ' save ' + self.target['container_id'] + ' | bzip2 - > ' + bzfile,
expect=[expect, 'assword'],
timeout=99999,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel) == 1:
self.send(password,
expect=expect,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel)
self.log('Deposited bzip2 of exported container into ' + bzfile, level=logging.DEBUG)
self.log('Run: bunzip2 -c ' + bzfile + ' | sudo docker import - to get this imported into docker.', level=logging.DEBUG)
self.build['report'] += ('\nDeposited bzip2 of exported container into ' + bzfile)
self.build['report'] += ('\nRun:\n\nbunzip2 -c ' + bzfile + ' | sudo docker import -\n\nto get this imported into docker.')
if self.repository['push']:
# Pass the child explicitly as it's the host child.
self.push_repository(repository, docker_executable=docker_executable, expect=expect, shutit_pexpect_child=shutit_pexpect_child)
self.build['report'] = (self.build['report'] + '\nPushed repository: ' + repository)
self.handle_note_after(note)
return True | Commit, tag, push, tar a docker container based on the configuration we have.
@param repo_name: Name of the repository.
@param docker_executable: Defaults to 'docker'
@param password:
@param force:
@type repo_name: string
@type docker_executable: string
@type password: string
@type force: boolean |
def trigger(self, identifier, force=True):
"""Trigger an upgrade task."""
self.debug(identifier)
url = "{base}/{identifier}".format(
base=self.local_base_url,
identifier=identifier
)
param = {}
if force:
param['force'] = force
encode = urllib.urlencode(param)
if encode:
url += "?"
url += encode
return self.core.update(url, {}) | Trigger an upgrade task. |
def set_slippage(self, us_equities=None, us_futures=None):
"""Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel`
"""
if self.initialized:
raise SetSlippagePostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.slippage_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.slippage_models[Future] = us_futures | Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel` |
def clean_up_datetime(obj_map):
"""convert datetime objects to dictionaries for storage"""
clean_map = {}
for key, value in obj_map.items():
if isinstance(value, datetime.datetime):
clean_map[key] = {
'year': value.year,
'month': value.month,
'day': value.day,
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'microsecond': value.microsecond,
'tzinfo': value.tzinfo
}
elif isinstance(value, dict):
clean_map[key] = clean_up_datetime(value)
elif isinstance(value, list):
if key not in clean_map:
clean_map[key] = []
if len(value) > 0:
for index, list_value in enumerate(value):
if isinstance(list_value, dict):
clean_map[key].append(clean_up_datetime(list_value))
else:
clean_map[key].append(list_value)
else:
clean_map[key] = value
else:
clean_map[key] = value
return clean_map | convert datetime objects to dictionaries for storage |
def parse_0134_013b(v):
"""
Parses the O2 Sensor Value (0134 - 013B) and returns two values parsed from it:
1. Fuel-Air Equivalence [Ratio] as a float from 0 - 2
2. Current in [mA] as a float from -128 - 128
:param str v:
:return tuple of float, float:
"""
try:
trim_val = trim_obd_value(v)
val_ab = int(trim_val[0:2], 16)
val_cd = int(trim_val[2:4], 16)
return (2 / 65536) * val_ab, val_cd - 128
except ValueError:
return None, None | Parses the O2 Sensor Value (0134 - 013B) and returns two values parsed from it:
1. Fuel-Air Equivalence [Ratio] as a float from 0 - 2
2. Current in [mA] as a float from -128 - 128
:param str v:
:return tuple of float, float: |
def set_base_prompt(
self, pri_prompt_terminator=":", alt_prompt_terminator=">", delay_factor=2
):
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
super(CoriantSSH, self).set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
return self.base_prompt | Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output. |
def apply_chords(im, spacing=1, axis=0, trim_edges=True, label=False):
r"""
Adds chords to the void space in the specified direction. The chords are
separated by 1 voxel plus the provided spacing.
Parameters
----------
im : ND-array
An image of the porous material with void marked as ``True``.
spacing : int
Separation between chords. The default is 1 voxel. This can be
decreased to 0, meaning that the chords all touch each other, which
automatically sets to the ``label`` argument to ``True``.
axis : int (default = 0)
The axis along which the chords are drawn.
trim_edges : bool (default = ``True``)
Whether or not to remove chords that touch the edges of the image.
These chords are artifically shortened, so skew the chord length
distribution.
label : bool (default is ``False``)
If ``True`` the chords in the returned image are each given a unique
label, such that all voxels lying on the same chord have the same
value. This is automatically set to ``True`` if spacing is 0, but is
``False`` otherwise.
Returns
-------
image : ND-array
A copy of ``im`` with non-zero values indicating the chords.
See Also
--------
apply_chords_3D
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if spacing < 0:
raise Exception('Spacing cannot be less than 0')
if spacing == 0:
label = True
result = sp.zeros(im.shape, dtype=int) # Will receive chords at end
slxyz = [slice(None, None, spacing*(axis != i) + 1) for i in [0, 1, 2]]
slices = tuple(slxyz[:im.ndim])
s = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] # Straight-line structuring element
if im.ndim == 3: # Make structuring element 3D if necessary
s = sp.pad(sp.atleast_3d(s), pad_width=((0, 0), (0, 0), (1, 1)),
mode='constant', constant_values=0)
im = im[slices]
s = sp.swapaxes(s, 0, axis)
chords = spim.label(im, structure=s)[0]
if trim_edges: # Label on border chords will be set to 0
chords = clear_border(chords)
result[slices] = chords # Place chords into empty image created at top
if label is False: # Remove label if not requested
result = result > 0
return result | r"""
Adds chords to the void space in the specified direction. The chords are
separated by 1 voxel plus the provided spacing.
Parameters
----------
im : ND-array
An image of the porous material with void marked as ``True``.
spacing : int
Separation between chords. The default is 1 voxel. This can be
decreased to 0, meaning that the chords all touch each other, which
automatically sets to the ``label`` argument to ``True``.
axis : int (default = 0)
The axis along which the chords are drawn.
trim_edges : bool (default = ``True``)
Whether or not to remove chords that touch the edges of the image.
These chords are artifically shortened, so skew the chord length
distribution.
label : bool (default is ``False``)
If ``True`` the chords in the returned image are each given a unique
label, such that all voxels lying on the same chord have the same
value. This is automatically set to ``True`` if spacing is 0, but is
``False`` otherwise.
Returns
-------
image : ND-array
A copy of ``im`` with non-zero values indicating the chords.
See Also
--------
apply_chords_3D |
def parse_error(text: str) -> Any:
"""
Validate and parse the BMA answer from websocket
:param text: the bma error
:return: the json data
"""
try:
data = json.loads(text)
jsonschema.validate(data, ERROR_SCHEMA)
except (TypeError, json.decoder.JSONDecodeError) as e:
raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e)))
return data | Validate and parse the BMA answer from websocket
:param text: the bma error
:return: the json data |
def login(self, email, password):
"""
:password: user password md5 digest
"""
payload = {
'account': email,
'password': password
}
code, msg, rv = self.request(
'mtop.alimusic.xuser.facade.xiamiuserservice.login',
payload
)
if code == 'SUCCESS':
# TODO: 保存 refreshToken 和过期时间等更多信息
# 根据目前观察,token 过期时间有三年
accessToken = rv['data']['data']['accessToken']
self.set_access_token(accessToken)
return rv | :password: user password md5 digest |
def clear_worker_output(self):
"""Drops all of the worker output collections"""
print 'Dropping all of the worker output collections... Whee!'
# Get all the collections in the workbench database
all_c = self.database.collection_names()
# Remove collections that we don't want to cap
try:
all_c.remove('system.indexes')
all_c.remove('fs.chunks')
all_c.remove('fs.files')
all_c.remove('sample_set')
all_c.remove('tags')
all_c.remove(self.sample_collection)
except ValueError:
print 'Catching a benign exception thats expected...'
for collection in all_c:
self.database.drop_collection(collection) | Drops all of the worker output collections |
def multi_packages(self, logins=None, platform=None, package_type=None,
type_=None, access=None):
"""Return the private packages for a given set of usernames/logins."""
logger.debug('')
method = self._multi_packages
new_client = True
try:
# Only the newer versions have extra keywords like `access`
self._anaconda_client_api.user_packages(access='private')
except Exception:
new_client = False
return self._create_worker(method, logins=logins,
platform=platform,
package_type=package_type,
type_=type_, access=access,
new_client=new_client) | Return the private packages for a given set of usernames/logins. |
def _to_ned(self):
"""
Switches the reference frame to NED
"""
if self.ref_frame is 'USE':
# Rotate
return utils.use_to_ned(self.tensor), \
utils.use_to_ned(self.tensor_sigma)
elif self.ref_frame is 'NED':
# Alreadt NED
return self.tensor, self.tensor_sigma
else:
raise ValueError('Reference frame %s not recognised - cannot '
'transform to NED!' % self.ref_frame) | Switches the reference frame to NED |
def find_numeration(line):
"""Given a reference line, attempt to locate instances of citation
'numeration' in the line.
@param line: (string) the reference line.
@return: (string) the reference line after numeration has been checked
and possibly recognized/marked-up.
"""
patterns = (
# vol,page,year
re_numeration_vol_page_yr,
re_numeration_vol_nucphys_page_yr,
re_numeration_nucphys_vol_page_yr,
# With sub volume
re_numeration_vol_subvol_nucphys_yr_page,
re_numeration_vol_nucphys_yr_subvol_page,
# vol,year,page
re_numeration_vol_yr_page,
re_numeration_nucphys_vol_yr_page,
re_numeration_vol_nucphys_series_yr_page,
# vol,page,year
re_numeration_vol_series_nucphys_page_yr,
re_numeration_vol_nucphys_series_page_yr,
# year,vol,page
re_numeration_yr_vol_page,
)
for pattern in patterns:
match = pattern.match(line)
if match:
info = match.groupdict()
series = info.get('series', None)
if not series:
series = extract_series_from_volume(info['vol'])
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt']
if not info['vol_num']:
info['vol_num'] = info['vol_num_alt2']
return {'year': info.get('year', None),
'series': series,
'volume': info['vol_num'],
'page': info['page'] or info['jinst_page'],
'page_end': info['page_end'],
'len': match.end()}
return None | Given a reference line, attempt to locate instances of citation
'numeration' in the line.
@param line: (string) the reference line.
@return: (string) the reference line after numeration has been checked
and possibly recognized/marked-up. |
def _shape_union(shapes):
"""A shape containing the union of all dimensions in the input shapes.
Args:
shapes: a list of Shapes
Returns:
a Shape
"""
return Shape(sorted(list(set(sum([s.dims for s in shapes], []))))) | A shape containing the union of all dimensions in the input shapes.
Args:
shapes: a list of Shapes
Returns:
a Shape |
def group(self, labels):
""" group as list """
unique_labels, groupxs = self.group_indicies(labels)
groups = [self.take(idxs) for idxs in groupxs]
return unique_labels, groups | group as list |
def create(self, obj, ref=None):
""" Convert *obj* to a new ShaderObject. If the output is a Variable
with no name, then set its name using *ref*.
"""
if isinstance(ref, Variable):
ref = ref.name
elif isinstance(ref, string_types) and ref.startswith('gl_'):
# gl_ names not allowed for variables
ref = ref[3:].lower()
# Allow any type of object to be converted to ShaderObject if it
# provides a magic method:
if hasattr(obj, '_shader_object'):
obj = obj._shader_object()
if isinstance(obj, ShaderObject):
if isinstance(obj, Variable) and obj.name is None:
obj.name = ref
elif isinstance(obj, string_types):
obj = TextExpression(obj)
else:
obj = Variable(ref, obj)
# Try prepending the name to indicate attribute, uniform, varying
if obj.vtype and obj.vtype[0] in 'auv':
obj.name = obj.vtype[0] + '_' + obj.name
return obj | Convert *obj* to a new ShaderObject. If the output is a Variable
with no name, then set its name using *ref*. |
def reloading_meta_metaclass_factory(BASE_TYPE=type):
""" hack for pyqt """
class ReloadingMetaclass2(BASE_TYPE):
def __init__(metaself, name, bases, dct):
super(ReloadingMetaclass2, metaself).__init__(name, bases, dct)
#print('Making rrr for %r' % (name,))
metaself.rrr = reload_class
return ReloadingMetaclass2 | hack for pyqt |
def add_remote_subnet(self, context_id, subnet_id):
"""Adds a remote subnet to a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int subnet_id: The id-value representing the remote subnet.
:return bool: True if remote subnet addition was successful.
"""
return self.context.addCustomerSubnetToNetworkTunnel(subnet_id,
id=context_id) | Adds a remote subnet to a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int subnet_id: The id-value representing the remote subnet.
:return bool: True if remote subnet addition was successful. |
def config_create(self, kernel=None, label=None, devices=[], disks=[],
volumes=[], **kwargs):
"""
Creates a Linode Config with the given attributes.
:param kernel: The kernel to boot with.
:param label: The config label
:param disks: The list of disks, starting at sda, to map to this config.
:param volumes: The volumes, starting after the last disk, to map to this
config
:param devices: A list of devices to assign to this config, in device
index order. Values must be of type Disk or Volume. If this is
given, you may not include disks or volumes.
:param **kwargs: Any other arguments accepted by the api.
:returns: A new Linode Config
"""
from .volume import Volume
hypervisor_prefix = 'sd' if self.hypervisor == 'kvm' else 'xvd'
device_names = [hypervisor_prefix + string.ascii_lowercase[i] for i in range(0, 8)]
device_map = {device_names[i]: None for i in range(0, len(device_names))}
if devices and (disks or volumes):
raise ValueError('You may not call config_create with "devices" and '
'either of "disks" or "volumes" specified!')
if not devices:
if not isinstance(disks, list):
disks = [disks]
if not isinstance(volumes, list):
volumes = [volumes]
devices = []
for d in disks:
if d is None:
devices.append(None)
elif isinstance(d, Disk):
devices.append(d)
else:
devices.append(Disk(self._client, int(d), self.id))
for v in volumes:
if v is None:
devices.append(None)
elif isinstance(v, Volume):
devices.append(v)
else:
devices.append(Volume(self._client, int(v)))
if not devices:
raise ValueError('Must include at least one disk or volume!')
for i, d in enumerate(devices):
if d is None:
pass
elif isinstance(d, Disk):
device_map[device_names[i]] = {'disk_id': d.id }
elif isinstance(d, Volume):
device_map[device_names[i]] = {'volume_id': d.id }
else:
raise TypeError('Disk or Volume expected!')
params = {
'kernel': kernel.id if issubclass(type(kernel), Base) else kernel,
'label': label if label else "{}_config_{}".format(self.label, len(self.configs)),
'devices': device_map,
}
params.update(kwargs)
result = self._client.post("{}/configs".format(Instance.api_endpoint), model=self, data=params)
self.invalidate()
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response creating config!', json=result)
c = Config(self._client, result['id'], self.id, result)
return c | Creates a Linode Config with the given attributes.
:param kernel: The kernel to boot with.
:param label: The config label
:param disks: The list of disks, starting at sda, to map to this config.
:param volumes: The volumes, starting after the last disk, to map to this
config
:param devices: A list of devices to assign to this config, in device
index order. Values must be of type Disk or Volume. If this is
given, you may not include disks or volumes.
:param **kwargs: Any other arguments accepted by the api.
:returns: A new Linode Config |
def get_content_type(obj, field_name=False):
"""
Returns the content type of an object.
:param obj: A model instance.
:param field_name: Field of the object to return.
"""
content_type = ContentType.objects.get_for_model(obj)
if field_name:
return getattr(content_type, field_name, '')
return content_type | Returns the content type of an object.
:param obj: A model instance.
:param field_name: Field of the object to return. |
def compile_file(env, src_path, dst_path, encoding='utf-8', base_dir=''):
"""Compiles a Jinja2 template to python code.
:param env: a Jinja2 Environment instance.
:param src_path: path to the source file.
:param dst_path: path to the destination file.
:param encoding: template encoding.
:param base_dir: the base path to be removed from the compiled template filename.
"""
src_file = file(src_path, 'r')
source = src_file.read().decode(encoding)
name = src_path.replace(base_dir, '')
raw = env.compile(source, name=name, filename=name, raw=True)
src_file.close()
dst_file = open(dst_path, 'w')
dst_file.write(raw)
dst_file.close() | Compiles a Jinja2 template to python code.
:param env: a Jinja2 Environment instance.
:param src_path: path to the source file.
:param dst_path: path to the destination file.
:param encoding: template encoding.
:param base_dir: the base path to be removed from the compiled template filename. |
def _parse_labels(self, labels, element):
r"""
This private method is used for converting \'labels\' to a proper
format, including dealing with wildcards (\*).
Parameters
----------
labels : string or list of strings
The label or list of labels to be parsed. Note that the \* can be
used as a wildcard.
Returns
-------
A list of label strings, with all wildcard matches included if
applicable.
"""
if labels is None:
raise Exception('Labels cannot be None')
if type(labels) is str:
labels = [labels]
# Parse the labels list
parsed_labels = []
for label in labels:
# Remove element from label, if present
if element in label:
label = label.split('.')[-1]
# Deal with wildcards
if '*' in label:
Ls = [L.split('.')[-1] for L in self.labels(element=element)]
if label.startswith('*'):
temp = [L for L in Ls if L.endswith(label.strip('*'))]
if label.endswith('*'):
temp = [L for L in Ls if L.startswith(label.strip('*'))]
temp = [element+'.'+L for L in temp]
elif element+'.'+label in self.keys():
temp = [element+'.'+label]
else:
temp = [element+'.'+label]
parsed_labels.extend(temp)
# Remove duplicates if any
[parsed_labels.remove(L) for L in parsed_labels
if parsed_labels.count(L) > 1]
return parsed_labels | r"""
This private method is used for converting \'labels\' to a proper
format, including dealing with wildcards (\*).
Parameters
----------
labels : string or list of strings
The label or list of labels to be parsed. Note that the \* can be
used as a wildcard.
Returns
-------
A list of label strings, with all wildcard matches included if
applicable. |
def check_directory_path(self, path):
"""
Ensure directory exists at the provided path
:type path: string
:param path: path to directory to check
"""
if os.path.isdir(path) is not True:
msg = "Directory Does Not Exist {}".format(path)
raise OSError(msg) | Ensure directory exists at the provided path
:type path: string
:param path: path to directory to check |
def _fastfood_list(args):
"""Run on `fastfood list`."""
template_pack = pack.TemplatePack(args.template_pack)
if args.stencil_set:
stencil_set = template_pack.load_stencil_set(args.stencil_set)
print("Available Stencils for %s:" % args.stencil_set)
for stencil in stencil_set.stencils:
print(" %s" % stencil)
else:
print('Available Stencil Sets:')
for name, vals in template_pack.stencil_sets.items():
print(" %12s - %12s" % (name, vals['help'])) | Run on `fastfood list`. |
def get_section_header(self, section):
"""
Get a specific section header by index or name.
Args:
section(int or str): The index or name of the section header to return.
Returns:
:class:`~ELF.SectionHeader`: The section header.
Raises:
KeyError: The requested section header does not exist.
"""
self._ensure_section_headers_loaded()
if type(section) is int:
return self._section_headers_by_index[section]
else:
return self._section_headers_by_name[section] | Get a specific section header by index or name.
Args:
section(int or str): The index or name of the section header to return.
Returns:
:class:`~ELF.SectionHeader`: The section header.
Raises:
KeyError: The requested section header does not exist. |
def upload(identifier, files,
metadata=None,
headers=None,
access_key=None,
secret_key=None,
queue_derive=None,
verbose=None,
verify=None,
checksum=None,
delete=None,
retries=None,
retries_sleep=None,
debug=None,
request_kwargs=None,
**get_item_kwargs):
"""Upload files to an item. The item will be created if it does not exist.
:type identifier: str
:param identifier: The globally unique Archive.org identifier for a given item.
:param files: The filepaths or file-like objects to upload. This value can be an
iterable or a single file-like object or string.
:type metadata: dict
:param metadata: (optional) Metadata used to create a new item. If the item already
exists, the metadata will not be updated -- use ``modify_metadata``.
:type headers: dict
:param headers: (optional) Add additional HTTP headers to the request.
:type access_key: str
:param access_key: (optional) IA-S3 access_key to use when making the given request.
:type secret_key: str
:param secret_key: (optional) IA-S3 secret_key to use when making the given request.
:type queue_derive: bool
:param queue_derive: (optional) Set to False to prevent an item from being derived
after upload.
:type verbose: bool
:param verbose: (optional) Display upload progress.
:type verify: bool
:param verify: (optional) Verify local MD5 checksum matches the MD5 checksum of the
file received by IAS3.
:type checksum: bool
:param checksum: (optional) Skip uploading files based on checksum.
:type delete: bool
:param delete: (optional) Delete local file after the upload has been successfully
verified.
:type retries: int
:param retries: (optional) Number of times to retry the given request if S3 returns a
503 SlowDown error.
:type retries_sleep: int
:param retries_sleep: (optional) Amount of time to sleep between ``retries``.
:type debug: bool
:param debug: (optional) Set to True to print headers to stdout, and exit without
sending the upload request.
:param \*\*kwargs: Optional arguments that ``get_item`` takes.
:returns: A list of :py:class:`requests.Response` objects.
"""
item = get_item(identifier, **get_item_kwargs)
return item.upload(files,
metadata=metadata,
headers=headers,
access_key=access_key,
secret_key=secret_key,
queue_derive=queue_derive,
verbose=verbose,
verify=verify,
checksum=checksum,
delete=delete,
retries=retries,
retries_sleep=retries_sleep,
debug=debug,
request_kwargs=request_kwargs) | Upload files to an item. The item will be created if it does not exist.
:type identifier: str
:param identifier: The globally unique Archive.org identifier for a given item.
:param files: The filepaths or file-like objects to upload. This value can be an
iterable or a single file-like object or string.
:type metadata: dict
:param metadata: (optional) Metadata used to create a new item. If the item already
exists, the metadata will not be updated -- use ``modify_metadata``.
:type headers: dict
:param headers: (optional) Add additional HTTP headers to the request.
:type access_key: str
:param access_key: (optional) IA-S3 access_key to use when making the given request.
:type secret_key: str
:param secret_key: (optional) IA-S3 secret_key to use when making the given request.
:type queue_derive: bool
:param queue_derive: (optional) Set to False to prevent an item from being derived
after upload.
:type verbose: bool
:param verbose: (optional) Display upload progress.
:type verify: bool
:param verify: (optional) Verify local MD5 checksum matches the MD5 checksum of the
file received by IAS3.
:type checksum: bool
:param checksum: (optional) Skip uploading files based on checksum.
:type delete: bool
:param delete: (optional) Delete local file after the upload has been successfully
verified.
:type retries: int
:param retries: (optional) Number of times to retry the given request if S3 returns a
503 SlowDown error.
:type retries_sleep: int
:param retries_sleep: (optional) Amount of time to sleep between ``retries``.
:type debug: bool
:param debug: (optional) Set to True to print headers to stdout, and exit without
sending the upload request.
:param \*\*kwargs: Optional arguments that ``get_item`` takes.
:returns: A list of :py:class:`requests.Response` objects. |
def register_service_agreement_consumer(storage_path, publisher_address, agreement_id, did,
service_agreement, service_definition_id, price,
encrypted_files, consumer_account, condition_ids,
consume_callback=None, start_time=None):
"""
Registers the given service agreement in the local storage.
Subscribes to the service agreement events.
:param storage_path: storage path for the internal db, str
:param publisher_address: ethereum account address of publisher, hex str
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param service_definition_id: identifier of the service inside the asset DDO, str
:param price: Asset price, int
:param encrypted_files: resutl of the files encrypted by the secret store, hex str
:param consumer_account: Account instance of the consumer
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param consume_callback:
:param start_time: start time, int
"""
if start_time is None:
start_time = int(datetime.now().timestamp())
record_service_agreement(
storage_path, agreement_id, did, service_definition_id, price, encrypted_files, start_time)
process_agreement_events_consumer(
publisher_address, agreement_id, did, service_agreement,
price, consumer_account, condition_ids,
consume_callback
) | Registers the given service agreement in the local storage.
Subscribes to the service agreement events.
:param storage_path: storage path for the internal db, str
:param publisher_address: ethereum account address of publisher, hex str
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param service_definition_id: identifier of the service inside the asset DDO, str
:param price: Asset price, int
:param encrypted_files: resutl of the files encrypted by the secret store, hex str
:param consumer_account: Account instance of the consumer
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param consume_callback:
:param start_time: start time, int |
def do_delete(endpoint, access_token):
'''Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response.
'''
headers = {"Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.delete(endpoint, headers=headers) | Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. |
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = b''
try:
while self.rawq:
# Handle non-IAC first (normal data).
char = self.rawq_getchar()
if char != IAC:
buf = buf + char
continue
# Interpret the command byte that follows after the IAC code.
command = self.rawq_getchar()
if command == theNULL:
self.msg('IAC NOP')
continue
elif command == IAC:
self.msg('IAC DATA')
buf = buf + command
continue
# DO: Indicates the request that the other party perform,
# or confirmation that you are expecting the other party
# to perform, the indicated option.
elif command == DO:
opt = self.rawq_getchar()
self.msg('IAC DO %s', ord(opt))
if opt == TTYPE:
self.sock.send(IAC+WILL+opt)
elif opt == NAWS:
self.sock.send(IAC+WILL+opt)
self.can_naws = True
if self.window_size:
self.set_window_size(*self.window_size)
else:
self.sock.send(IAC+WONT+opt)
# DON'T: Indicates the demand that the other party stop
# performing, or confirmation that you are no longer
# expecting the other party to perform, the indicated
# option.
elif command == DONT:
opt = self.rawq_getchar()
self.msg('IAC DONT %s', ord(opt))
self.sock.send(IAC+WONT+opt)
# SB: Indicates that what follows is subnegotiation of the
# indicated option.
elif command == SB:
opt = self.rawq_getchar()
self.msg('IAC SUBCOMMAND %d', ord(opt))
# We only handle the TTYPE command, so skip all other
# commands.
if opt != TTYPE:
while self.rawq_getchar() != SE:
pass
continue
# We also only handle the SEND_TTYPE option of TTYPE,
# so skip everything else.
subopt = self.rawq_getchar()
if subopt != SEND_TTYPE:
while self.rawq_getchar() != SE:
pass
continue
# Mandatory end of the IAC subcommand.
iac = self.rawq_getchar()
end = self.rawq_getchar()
if (iac, end) != (IAC, SE):
# whoops, that's an unexpected response...
self.msg(
'expected IAC SE, but got %d %d', ord(iac), ord(end))
self.msg('IAC SUBCOMMAND_END')
# Send the next supported terminal.
ttype = self.termtype.encode('latin1')
self.msg('indicating support for terminal type %s', ttype)
self.sock.send(IAC+SB+TTYPE+theNULL+ttype+IAC+SE)
elif command in (WILL, WONT):
opt = self.rawq_getchar()
self.msg('IAC %s %d',
command == WILL and 'WILL' or 'WONT', ord(opt))
if opt == ECHO:
self.sock.send(IAC+DO+opt)
else:
self.sock.send(IAC+DONT+opt)
else:
self.msg('IAC %d not recognized' % ord(command))
except EOFError: # raised by self.rawq_getchar()
pass
buf = buf.decode(self.encoding)
self.cookedq.write(buf)
if self.data_callback is not None:
self.data_callback(buf, **self.data_callback_kwargs) | Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence. |
def refresh(self):
"""Refresh the current figure
"""
for cbar in self.colorbars:
cbar.draw_all()
self.canvas.draw() | Refresh the current figure |
def parse_python_file(filepath):
"""
Retrieves the AST of the specified file.
This function performs simple caching so that the same file isn't read or
parsed more than once per process.
:param filepath: the file to parse
:type filepath: str
:returns: ast.AST
"""
with _AST_CACHE_LOCK:
if filepath not in _AST_CACHE:
source = read_file(filepath)
_AST_CACHE[filepath] = ast.parse(source, filename=filepath)
return _AST_CACHE[filepath] | Retrieves the AST of the specified file.
This function performs simple caching so that the same file isn't read or
parsed more than once per process.
:param filepath: the file to parse
:type filepath: str
:returns: ast.AST |
def make_translations(unique_name, node):
'''
Compute and store the title that should be displayed
when linking to a given unique_name, eg in python
when linking to test_greeter_greet() we want to display
Test.Greeter.greet
'''
introspectable = not node.attrib.get('introspectable') == '0'
if node.tag == core_ns('member'):
__TRANSLATED_NAMES['c'][unique_name] = unique_name
if introspectable:
components = get_gi_name_components(node)
components[-1] = components[-1].upper()
gi_name = '.'.join(components)
__TRANSLATED_NAMES['python'][unique_name] = gi_name
__TRANSLATED_NAMES['javascript'][unique_name] = gi_name
elif c_ns('identifier') in node.attrib:
__TRANSLATED_NAMES['c'][unique_name] = unique_name
if introspectable:
components = get_gi_name_components(node)
gi_name = '.'.join(components)
__TRANSLATED_NAMES['python'][unique_name] = gi_name
components[-1] = 'prototype.%s' % components[-1]
__TRANSLATED_NAMES['javascript'][unique_name] = '.'.join(components)
elif c_ns('type') in node.attrib:
components = get_gi_name_components(node)
gi_name = '.'.join(components)
__TRANSLATED_NAMES['c'][unique_name] = unique_name
if introspectable:
__TRANSLATED_NAMES['javascript'][unique_name] = gi_name
__TRANSLATED_NAMES['python'][unique_name] = gi_name
elif node.tag == core_ns('field'):
components = []
get_field_c_name_components(node, components)
display_name = '.'.join(components[1:])
__TRANSLATED_NAMES['c'][unique_name] = display_name
if introspectable:
__TRANSLATED_NAMES['javascript'][unique_name] = display_name
__TRANSLATED_NAMES['python'][unique_name] = display_name
elif node.tag == core_ns('virtual-method'):
display_name = node.attrib['name']
__TRANSLATED_NAMES['c'][unique_name] = display_name
if introspectable:
__TRANSLATED_NAMES['javascript'][unique_name] = 'vfunc_%s' % display_name
__TRANSLATED_NAMES['python'][unique_name] = 'do_%s' % display_name
elif node.tag == core_ns('property'):
display_name = node.attrib['name']
__TRANSLATED_NAMES['c'][unique_name] = display_name
if introspectable:
__TRANSLATED_NAMES['javascript'][unique_name] = display_name
__TRANSLATED_NAMES['python'][unique_name] = display_name.replace('-', '_')
else:
__TRANSLATED_NAMES['c'][unique_name] = node.attrib.get('name')
if introspectable:
__TRANSLATED_NAMES['python'][unique_name] = node.attrib.get('name')
__TRANSLATED_NAMES['javascript'][unique_name] = node.attrib.get('name') | Compute and store the title that should be displayed
when linking to a given unique_name, eg in python
when linking to test_greeter_greet() we want to display
Test.Greeter.greet |
def get(self, identity):
"""
Constructs a SyncMapPermissionContext
:param identity: Identity of the user to whom the Sync Map Permission applies.
:returns: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionContext
:rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionContext
"""
return SyncMapPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
identity=identity,
) | Constructs a SyncMapPermissionContext
:param identity: Identity of the user to whom the Sync Map Permission applies.
:returns: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionContext
:rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionContext |
def read( handle, id=None ):
"""
Reads a structure via PDBParser.
Simplifies life..
"""
from Bio.PDB import PDBParser
if not id:
id = os.path.basename(handle).split('.')[0] # Get from filename
p = PDBParser()
s = p.get_structure(id, handle)
return s | Reads a structure via PDBParser.
Simplifies life.. |
def spell_check(T, w):
"""Spellchecker
:param T: trie encoding the dictionary
:param w: given word
:returns: a closest word from the dictionary
:complexity: linear if distance was constant
"""
assert T is not None
dist = 0
while True: # Try increasing distances
u = search(T, dist, w)
if u is not None:
return u
dist += 1 | Spellchecker
:param T: trie encoding the dictionary
:param w: given word
:returns: a closest word from the dictionary
:complexity: linear if distance was constant |
def feature(self,feat=None,searchforit=False,init=None):
"""
Returns value of self.feats[feat].
If searchforit==True, will search in this object's children recursively.
If not found, returns None.
"""
if feat==None:
return self.feats
if not init:
init=self
init.tick=0
init._matches=[]
feat=feat.strip()
if feat.startswith("+"):
init._eval=True
feat=feat[1:]
elif feat.startswith("-"):
init._eval=False
feat=feat[1:]
else:
init._eval=None
if (hasattr(self,'feats')) and (feat in self.feats):
if type(self.feats[feat]) == type([]):
if len(self.feats[feat]) > 1:
return self.feats[feat]
else:
return self.feats[feat][0]
else:
return self.feats[feat]
else:
if searchforit:
for child in self.descendants():
init.tick+=1
x=child.feature(feat,searchforit,init)
#print init.tick, self.classname(), child.classname(), x
if x==None: continue
init._matches.append ( (child,x) )
#return [child.feature(feat,searchforit) for child in self.descendants()]
else:
return None
#if searchforit:
# return self.search(SearchTerm(feat))
#else:
# None
if self==init:
if init._eval==None:
return init._matches
else:
return [ x for (x,y) in init._matches if bool(y)==init._eval ] | Returns value of self.feats[feat].
If searchforit==True, will search in this object's children recursively.
If not found, returns None. |
def do_fontsave(self, arg):
"""Saves the session variables to a file so that the same analysis can be continued later."""
#We need to save the full paths to the staging directories for the tests that are loaded
#so far; then when the session is restored, we can reparse the results.
from os import path
import json
fullpath = path.expanduser(arg)
data = {
"fonts": self.curargs["fonts"],
"ticks": self.curargs["ticks"]
}
with open(fullpath, 'w') as f:
json.dump(data, f)
msg.okay("Saved current font settings to {}".format(fullpath)) | Saves the session variables to a file so that the same analysis can be continued later. |
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name | Dump the name to string, after normalizing it. |
def AnalizarXml(self, xml=""):
"Analiza un mensaje XML (por defecto la respuesta)"
try:
if not xml:
xml = self.XmlResponse
self.xml = SimpleXMLElement(xml)
return True
except Exception, e:
self.Excepcion = u"%s" % (e)
return False | Analiza un mensaje XML (por defecto la respuesta) |
def diag_jacobian(xs,
ys=None,
sample_shape=None,
fn=None,
parallel_iterations=10,
name=None):
"""Computes diagonal of the Jacobian matrix of `ys=fn(xs)` wrt `xs`.
If `ys` is a tensor or a list of tensors of the form `(ys_1, .., ys_n)` and
`xs` is of the form `(xs_1, .., xs_n)`, the function `jacobians_diag`
computes the diagonal of the Jacobian matrix, i.e., the partial derivatives
`(dys_1/dxs_1,.., dys_n/dxs_n`). For definition details, see
https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant
#### Example
##### Diagonal Hessian of the log-density of a 3D Gaussian distribution
In this example we sample from a standard univariate normal
distribution using MALA with `step_size` equal to 0.75.
```python
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
tfd = tfp.distributions
dtype = np.float32
with tf.Session(graph=tf.Graph()) as sess:
true_mean = dtype([0, 0, 0])
true_cov = dtype([[1, 0.25, 0.25], [0.25, 2, 0.25], [0.25, 0.25, 3]])
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
# Assume that the state is passed as a list of tensors `x` and `y`.
# Then the target function is defined as follows:
def target_fn(x, y):
# Stack the input tensors together
z = tf.concat([x, y], axis=-1) - true_mean
return target.log_prob(z)
sample_shape = [3, 5]
state = [tf.ones(sample_shape + [2], dtype=dtype),
tf.ones(sample_shape + [1], dtype=dtype)]
fn_val, grads = tfp.math.value_and_gradient(target_fn, state)
# We can either pass the `sample_shape` of the `state` or not, which impacts
# computational speed of `diag_jacobian`
_, diag_jacobian_shape_passed = diag_jacobian(
xs=state, ys=grads, sample_shape=tf.shape(fn_val))
_, diag_jacobian_shape_none = diag_jacobian(
xs=state, ys=grads)
diag_jacobian_shape_passed_ = sess.run(diag_jacobian_shape_passed)
diag_jacobian_shape_none_ = sess.run(diag_jacobian_shape_none)
print('hessian computed through `diag_jacobian`, sample_shape passed: ',
np.concatenate(diag_jacobian_shape_passed_, -1))
print('hessian computed through `diag_jacobian`, sample_shape skipped',
np.concatenate(diag_jacobian_shape_none_, -1))
```
Args:
xs: `Tensor` or a python `list` of `Tensors` of real-like dtypes and shapes
`sample_shape` + `event_shape_i`, where `event_shape_i` can be different
for different tensors.
ys: `Tensor` or a python `list` of `Tensors` of the same dtype as `xs`. Must
broadcast with the shape of `xs`. Can be omitted if `fn` is provided.
sample_shape: A common `sample_shape` of the input tensors of `xs`. If not,
provided, assumed to be `[1]`, which may result in a slow performance of
`jacobians_diag`.
fn: Python callable that takes `xs` as an argument (or `*xs`, if it is a
list) and returns `ys`. Might be skipped if `ys` is provided and
`tf.enable_eager_execution()` is disabled.
parallel_iterations: `int` that specifies the allowed number of coordinates
of the input tensor `xs`, for which the partial derivatives `dys_i/dxs_i`
can be computed in parallel.
name: Python `str` name prefixed to `Ops` created by this function.
Default value: `None` (i.e., "diag_jacobian").
Returns:
ys: a list, which coincides with the input `ys`, when provided.
If the input `ys` is None, `fn(*xs)` gets computed and returned as a list.
jacobians_diag_res: a `Tensor` or a Python list of `Tensor`s of the same
dtypes and shapes as the input `xs`. This is the diagonal of the Jacobian
of ys wrt xs.
Raises:
ValueError: if lists `xs` and `ys` have different length or both `ys` and
`fn` are `None`, or `fn` is None in the eager execution mode.
"""
with tf.compat.v1.name_scope(name, 'jacobians_diag', [xs, ys]):
if sample_shape is None:
sample_shape = [1]
# Output Jacobian diagonal
jacobians_diag_res = []
# Convert input `xs` to a list
xs = list(xs) if _is_list_like(xs) else [xs]
xs = [tf.convert_to_tensor(value=x) for x in xs]
if not tf.executing_eagerly():
if ys is None:
if fn is None:
raise ValueError('Both `ys` and `fn` can not be `None`')
else:
ys = fn(*xs)
# Convert ys to a list
ys = list(ys) if _is_list_like(ys) else [ys]
if len(xs) != len(ys):
raise ValueError('`xs` and `ys` should have the same length')
for y, x in zip(ys, xs):
# Broadcast `y` to the shape of `x`.
y_ = y + tf.zeros_like(x)
# Change `event_shape` to one-dimension
y_ = tf.reshape(y, tf.concat([sample_shape, [-1]], -1))
# Declare an iterator and tensor array loop variables for the gradients.
n = tf.size(input=x) / tf.cast(
tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32)
n = tf.cast(n, dtype=tf.int32)
loop_vars = [
0,
tf.TensorArray(x.dtype, n)
]
def loop_body(j):
"""Loop function to compute gradients of the each direction."""
# Gradient along direction `j`.
res = tf.gradients(ys=y_[..., j], xs=x)[0] # pylint: disable=cell-var-from-loop
if res is None:
# Return zero, if the gradient is `None`.
res = tf.zeros(tf.concat([sample_shape, [1]], -1),
dtype=x.dtype) # pylint: disable=cell-var-from-loop
else:
# Reshape `event_shape` to 1D
res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1))
# Add artificial dimension for the case of zero shape input tensor
res = tf.expand_dims(res, 0)
res = res[..., j]
return res # pylint: disable=cell-var-from-loop
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, jacobian_diag_res = tf.while_loop(
cond=lambda j, _: j < n, # pylint: disable=cell-var-from-loop
body=lambda j, result: (j + 1, result.write(j, loop_body(j))),
loop_vars=loop_vars,
parallel_iterations=parallel_iterations)
shape_x = tf.shape(input=x)
# Stack gradients together and move flattened `event_shape` to the
# zero position
reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack())
# Reshape to the original tensor
reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x)
jacobians_diag_res.append(reshaped_jacobian_diag)
else:
if fn is None:
raise ValueError('`fn` can not be `None` when eager execution is '
'enabled')
if ys is None:
ys = fn(*xs)
def fn_slice(i, j):
"""Broadcast y[i], flatten event shape of y[i], return y[i][..., j]."""
def fn_broadcast(*state):
res = fn(*state)
res = list(res) if _is_list_like(res) else [res]
if len(res) != len(state):
res *= len(state)
res = [tf.reshape(r + tf.zeros_like(s),
tf.concat([sample_shape, [-1]], -1))
for r, s in zip(res, state)]
return res
# Expand dimensions before returning in order to support 0D input `xs`
return lambda *state: tf.expand_dims(fn_broadcast(*state)[i], 0)[..., j]
def make_loop_body(i, x):
"""Loop function to compute gradients of the each direction."""
def _fn(j, result):
res = value_and_gradient(fn_slice(i, j), xs)[1][i]
if res is None:
res = tf.zeros(tf.concat([sample_shape, [1]], -1), dtype=x.dtype)
else:
res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1))
res = res[..., j]
return j + 1, result.write(j, res)
return _fn
for i, x in enumerate(xs):
# Declare an iterator and tensor array loop variables for the gradients.
n = tf.size(input=x) / tf.cast(
tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32)
n = tf.cast(n, dtype=tf.int32)
loop_vars = [
0,
tf.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, jacobian_diag_res = tf.while_loop(
cond=lambda j, _: j < n,
body=make_loop_body(i, x),
loop_vars=loop_vars,
parallel_iterations=parallel_iterations)
shape_x = tf.shape(input=x)
# Stack gradients together and move flattened `event_shape` to the
# zero position
reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack())
# Reshape to the original tensor
reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x)
jacobians_diag_res.append(reshaped_jacobian_diag)
return ys, jacobians_diag_res | Computes diagonal of the Jacobian matrix of `ys=fn(xs)` wrt `xs`.
If `ys` is a tensor or a list of tensors of the form `(ys_1, .., ys_n)` and
`xs` is of the form `(xs_1, .., xs_n)`, the function `jacobians_diag`
computes the diagonal of the Jacobian matrix, i.e., the partial derivatives
`(dys_1/dxs_1,.., dys_n/dxs_n`). For definition details, see
https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant
#### Example
##### Diagonal Hessian of the log-density of a 3D Gaussian distribution
In this example we sample from a standard univariate normal
distribution using MALA with `step_size` equal to 0.75.
```python
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
tfd = tfp.distributions
dtype = np.float32
with tf.Session(graph=tf.Graph()) as sess:
true_mean = dtype([0, 0, 0])
true_cov = dtype([[1, 0.25, 0.25], [0.25, 2, 0.25], [0.25, 0.25, 3]])
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
# Assume that the state is passed as a list of tensors `x` and `y`.
# Then the target function is defined as follows:
def target_fn(x, y):
# Stack the input tensors together
z = tf.concat([x, y], axis=-1) - true_mean
return target.log_prob(z)
sample_shape = [3, 5]
state = [tf.ones(sample_shape + [2], dtype=dtype),
tf.ones(sample_shape + [1], dtype=dtype)]
fn_val, grads = tfp.math.value_and_gradient(target_fn, state)
# We can either pass the `sample_shape` of the `state` or not, which impacts
# computational speed of `diag_jacobian`
_, diag_jacobian_shape_passed = diag_jacobian(
xs=state, ys=grads, sample_shape=tf.shape(fn_val))
_, diag_jacobian_shape_none = diag_jacobian(
xs=state, ys=grads)
diag_jacobian_shape_passed_ = sess.run(diag_jacobian_shape_passed)
diag_jacobian_shape_none_ = sess.run(diag_jacobian_shape_none)
print('hessian computed through `diag_jacobian`, sample_shape passed: ',
np.concatenate(diag_jacobian_shape_passed_, -1))
print('hessian computed through `diag_jacobian`, sample_shape skipped',
np.concatenate(diag_jacobian_shape_none_, -1))
```
Args:
xs: `Tensor` or a python `list` of `Tensors` of real-like dtypes and shapes
`sample_shape` + `event_shape_i`, where `event_shape_i` can be different
for different tensors.
ys: `Tensor` or a python `list` of `Tensors` of the same dtype as `xs`. Must
broadcast with the shape of `xs`. Can be omitted if `fn` is provided.
sample_shape: A common `sample_shape` of the input tensors of `xs`. If not,
provided, assumed to be `[1]`, which may result in a slow performance of
`jacobians_diag`.
fn: Python callable that takes `xs` as an argument (or `*xs`, if it is a
list) and returns `ys`. Might be skipped if `ys` is provided and
`tf.enable_eager_execution()` is disabled.
parallel_iterations: `int` that specifies the allowed number of coordinates
of the input tensor `xs`, for which the partial derivatives `dys_i/dxs_i`
can be computed in parallel.
name: Python `str` name prefixed to `Ops` created by this function.
Default value: `None` (i.e., "diag_jacobian").
Returns:
ys: a list, which coincides with the input `ys`, when provided.
If the input `ys` is None, `fn(*xs)` gets computed and returned as a list.
jacobians_diag_res: a `Tensor` or a Python list of `Tensor`s of the same
dtypes and shapes as the input `xs`. This is the diagonal of the Jacobian
of ys wrt xs.
Raises:
ValueError: if lists `xs` and `ys` have different length or both `ys` and
`fn` are `None`, or `fn` is None in the eager execution mode. |
def _requirements_sanitize(req_list):
# type: (List[str]) -> List[str]
"""
Cleanup a list of requirement strings (e.g. from requirements.txt) to only
contain entries valid for this platform and with the lowest required version
only.
Example
-------
>>> from sys import version_info
>>> _requirements_sanitize([
... 'foo>=3.0',
... "monotonic>=1.0,>0.1;python_version=='2.4'",
... "bar>1.0;python_version=='{}.{}'".format(version_info[0], version_info[1])
... ])
['foo >= 3.0', 'bar > 1.0']
"""
filtered_req_list = (
_requirement_find_lowest_possible(req) for req in
(pkg_resources.Requirement.parse(s) for s in req_list)
if _requirement_filter_by_marker(req)
)
return [" ".join(req) for req in filtered_req_list] | Cleanup a list of requirement strings (e.g. from requirements.txt) to only
contain entries valid for this platform and with the lowest required version
only.
Example
-------
>>> from sys import version_info
>>> _requirements_sanitize([
... 'foo>=3.0',
... "monotonic>=1.0,>0.1;python_version=='2.4'",
... "bar>1.0;python_version=='{}.{}'".format(version_info[0], version_info[1])
... ])
['foo >= 3.0', 'bar > 1.0'] |
def parse(inp, format=None, encoding='utf-8', force_types=True):
"""Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp
"""
proper_inp = inp
if hasattr(inp, 'read'):
proper_inp = inp.read()
# if proper_inp is unicode, encode it
if isinstance(proper_inp, six.text_type):
proper_inp = proper_inp.encode(encoding)
# try to guess markup type
fname = None
if hasattr(inp, 'name'):
fname = inp.name
fmt = _get_format(format, fname, proper_inp)
# make it look like file-like bytes-yielding object
proper_inp = six.BytesIO(proper_inp)
try:
res = _do_parse(proper_inp, fmt, encoding, force_types)
except Exception as e:
# I wish there was only Python 3 and I could just use "raise ... from e"
raise AnyMarkupError(e, traceback.format_exc())
if res is None:
res = {}
return res | Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp |
def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_):
"""
KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:param id_:
The ID of the usage - 1 for key, 2 for iv, 3 for mac
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
type_name(salt)
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
type_name(iterations)
))
if iterations < 1:
raise ValueError(pretty_message(
'''
iterations must be greater than 0 - is %s
''',
repr(iterations)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 1:
raise ValueError(pretty_message(
'''
key_length must be greater than 0 - is %s
''',
repr(key_length)
))
if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md5", "sha1", "sha224", "sha256",
"sha384", "sha512", not %s
''',
repr(hash_algorithm)
))
if id_ not in set([1, 2, 3]):
raise ValueError(pretty_message(
'''
id_ must be one of 1, 2, 3, not %s
''',
repr(id_)
))
utf16_password = password.decode('utf-8').encode('utf-16be') + b'\x00\x00'
algo = getattr(hashlib, hash_algorithm)
# u and v values are bytes (not bits as in the RFC)
u = {
'md5': 16,
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
if hash_algorithm in ['sha384', 'sha512']:
v = 128
else:
v = 64
# Step 1
d = chr_cls(id_) * v
# Step 2
s = b''
if salt != b'':
s_len = v * int(math.ceil(float(len(salt)) / v))
while len(s) < s_len:
s += salt
s = s[0:s_len]
# Step 3
p = b''
if utf16_password != b'':
p_len = v * int(math.ceil(float(len(utf16_password)) / v))
while len(p) < p_len:
p += utf16_password
p = p[0:p_len]
# Step 4
i = s + p
# Step 5
c = int(math.ceil(float(key_length) / u))
a = b'\x00' * (c * u)
for num in range(1, c + 1):
# Step 6A
a2 = algo(d + i).digest()
for _ in range(2, iterations + 1):
a2 = algo(a2).digest()
if num < c:
# Step 6B
b = b''
while len(b) < v:
b += a2
b = int_from_bytes(b[0:v]) + 1
# Step 6C
for num2 in range(0, len(i) // v):
start = num2 * v
end = (num2 + 1) * v
i_num2 = i[start:end]
i_num2 = int_to_bytes(int_from_bytes(i_num2) + b)
# Ensure the new slice is the right size
i_num2_l = len(i_num2)
if i_num2_l > v:
i_num2 = i_num2[i_num2_l - v:]
i = i[0:start] + i_num2 + i[end:]
# Step 7 (one peice at a time)
begin = (num - 1) * u
to_copy = min(key_length, u)
a = a[0:begin] + a2[0:to_copy] + a[begin + to_copy:]
return a[0:key_length] | KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:param id_:
The ID of the usage - 1 for key, 2 for iv, 3 for mac
:return:
The derived key as a byte string |
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating a new one if necessary.
Args:
defaults (dict): Used when we create a new object. Must map to fields
of the model.
\*\*kwargs: Used both for filtering and new object creation.
Returns:
A tuple of (object, created), where created is a boolean variable
specifies whether the object was newly created or not.
Example:
In the following example, *code* and *name* fields are used to query the DB.
.. code-block:: python
obj, is_new = Permission.objects.get_or_create({'description': desc},
code=code, name=name)
{description: desc} dict is just for new creations. If we can't find any
records by filtering on *code* and *name*, then we create a new object by
using all of the inputs.
"""
try:
return self.get(**kwargs), False
except ObjectDoesNotExist:
pass
data = defaults or {}
data.update(kwargs)
return self._model_class(**data).blocking_save(), True | Looks up an object with the given kwargs, creating a new one if necessary.
Args:
defaults (dict): Used when we create a new object. Must map to fields
of the model.
\*\*kwargs: Used both for filtering and new object creation.
Returns:
A tuple of (object, created), where created is a boolean variable
specifies whether the object was newly created or not.
Example:
In the following example, *code* and *name* fields are used to query the DB.
.. code-block:: python
obj, is_new = Permission.objects.get_or_create({'description': desc},
code=code, name=name)
{description: desc} dict is just for new creations. If we can't find any
records by filtering on *code* and *name*, then we create a new object by
using all of the inputs. |
def density_2d(self, x, y, rho0, Rs, center_x=0, center_y=0):
"""
projected density
:param x:
:param y:
:param rho0:
:param a:
:param s:
:param center_x:
:param center_y:
:return:
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
X = r/Rs
sigma0 = self.rho2sigma(rho0, Rs)
if isinstance(X, int) or isinstance(X, float):
if X == 1:
X = 1.000001
else:
X[X == 1] = 1.000001
sigma = sigma0 / (X**2-1)**2 * (-3 + (2+X**2)*self._F(X))
return sigma | projected density
:param x:
:param y:
:param rho0:
:param a:
:param s:
:param center_x:
:param center_y:
:return: |
def cmd_cminv(self, ch=None):
"""cminv ch=chname
Invert the color map in the channel/viewer
"""
viewer = self.get_viewer(ch)
if viewer is None:
self.log("No current viewer/channel.")
return
viewer.invert_cmap() | cminv ch=chname
Invert the color map in the channel/viewer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.