code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def serve(config):
"Serve the app with Gevent"
from gevent.pywsgi import WSGIServer
app = make_app(config=config)
host = app.config.get("HOST", '127.0.0.1')
port = app.config.get("PORT", 5000)
http_server = WSGIServer((host, port), app)
http_server.serve_forever() | Serve the app with Gevent |
def conn_is_open(conn):
"""Tests sqlite3 connection, returns T/F"""
if conn is None:
return False
try:
get_table_names(conn)
return True
# # Idea taken from
# # http: // stackoverflow.com / questions / 1981392 / how - to - tell - if -python - sqlite - database - connection - or -cursor - is -closed
# conn.execute("select id from molecule limit 1")
# return True
except sqlite3.ProgrammingError as e:
# print(e)
return False | Tests sqlite3 connection, returns T/F |
def getTopRight(self):
"""
Retrieves a tuple with the x,y coordinates of the upper right point of the rect.
Requires the coordinates, width, height to be numbers
"""
return (float(self.get_x()) + float(self.get_width()), float(self.get_y()) + float(self.get_height())) | Retrieves a tuple with the x,y coordinates of the upper right point of the rect.
Requires the coordinates, width, height to be numbers |
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + " " + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + ": " + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall("".join(buf)) | Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this. |
def _init_params(self, amplitude, length_scale, validate_args):
"""Shared init logic for `amplitude` and `length_scale` params.
Args:
amplitude: `Tensor` (or convertible) or `None` to convert, validate.
length_scale: `Tensor` (or convertible) or `None` to convert, validate.
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance
Returns:
dtype: The common `DType` of the parameters.
"""
dtype = util.maybe_get_common_dtype(
[amplitude, length_scale])
if amplitude is not None:
amplitude = tf.convert_to_tensor(
value=amplitude, name='amplitude', dtype=dtype)
self._amplitude = _validate_arg_if_not_none(
amplitude, tf.compat.v1.assert_positive, validate_args)
if length_scale is not None:
length_scale = tf.convert_to_tensor(
value=length_scale, name='length_scale', dtype=dtype)
self._length_scale = _validate_arg_if_not_none(
length_scale, tf.compat.v1.assert_positive, validate_args)
return dtype | Shared init logic for `amplitude` and `length_scale` params.
Args:
amplitude: `Tensor` (or convertible) or `None` to convert, validate.
length_scale: `Tensor` (or convertible) or `None` to convert, validate.
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance
Returns:
dtype: The common `DType` of the parameters. |
def set_secure_boot_mode(irmc_info, enable):
"""Enable/Disable secure boot on the server.
:param irmc_info: node info
:param enable: True, if secure boot needs to be
enabled for next boot, else False.
"""
bios_config_data = {
'Server': {
'@Version': '1.01',
'SystemConfig': {
'BiosConfig': {
'@Version': '1.01',
'SecurityConfig': {
'SecureBootControlEnabled': enable
}
}
}
}
}
restore_bios_config(irmc_info=irmc_info, bios_config=bios_config_data) | Enable/Disable secure boot on the server.
:param irmc_info: node info
:param enable: True, if secure boot needs to be
enabled for next boot, else False. |
def free_symbols(self):
"""Set of all free symbols"""
return set([
sym for sym in self.term.free_symbols
if sym not in self.bound_symbols]) | Set of all free symbols |
def _register(self, defaults=None, **kwargs):
"""Fetch (update or create) an instance, lazily.
We're doing this lazily, so that it becomes possible to define
custom enums in your code, even before the Django ORM is fully
initialized.
Domain.objects.SHOPPING = Domain.objects.register(
ref='shopping',
name='Webshop')
Domain.objects.USERS = Domain.objects.register(
ref='users',
name='User Accounts')
"""
f = lambda: self.update_or_create(defaults=defaults, **kwargs)[0]
ret = SimpleLazyObject(f)
self._lazy_entries.append(ret)
return ret | Fetch (update or create) an instance, lazily.
We're doing this lazily, so that it becomes possible to define
custom enums in your code, even before the Django ORM is fully
initialized.
Domain.objects.SHOPPING = Domain.objects.register(
ref='shopping',
name='Webshop')
Domain.objects.USERS = Domain.objects.register(
ref='users',
name='User Accounts') |
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
super(DataVisualizer, self).fit(X, y, **kwargs)
# Store the classes for the legend if they're None.
if self.classes_ is None:
# TODO: Is this the most efficient method?
self.classes_ = [str(label) for label in np.unique(y)]
# Draw the instances
self.draw(X, y, **kwargs)
# Fit always returns self.
return self | The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer |
def from_data(cls, data):
"""Create gyroscope stream from data array
Parameters
-------------------
data : (N, 3) ndarray
Data array of angular velocities (rad/s)
Returns
-------------------
GyroStream
Stream object
"""
if not data.shape[1] == 3:
raise ValueError("Gyroscope data must have shape (N, 3)")
instance = cls()
instance.data = data
return instance | Create gyroscope stream from data array
Parameters
-------------------
data : (N, 3) ndarray
Data array of angular velocities (rad/s)
Returns
-------------------
GyroStream
Stream object |
def initialize(self, config, context):
"""Implements Pulsar Spout's initialize method"""
self.logger.info("Initializing PulsarSpout with the following")
self.logger.info("Component-specific config: \n%s" % str(config))
self.logger.info("Context: \n%s" % str(context))
self.emit_count = 0
self.ack_count = 0
self.fail_count = 0
if not PulsarSpout.serviceUrl in config or not PulsarSpout.topicName in config:
self.logger.fatal("Need to specify both serviceUrl and topicName")
self.pulsar_cluster = str(config[PulsarSpout.serviceUrl])
self.topic = str(config[PulsarSpout.topicName])
mode = config[api_constants.TOPOLOGY_RELIABILITY_MODE]
if mode == api_constants.TopologyReliabilityMode.ATLEAST_ONCE:
self.acking_timeout = 1000 * int(config[api_constants.TOPOLOGY_MESSAGE_TIMEOUT_SECS])
else:
self.acking_timeout = 30000
if PulsarSpout.receiveTimeoutMs in config:
self.receive_timeout_ms = config[PulsarSpout.receiveTimeoutMs]
else:
self.receive_timeout_ms = 10
if PulsarSpout.deserializer in config:
self.deserializer = config[PulsarSpout.deserializer]
if not callable(self.deserializer):
self.logger.fatal("Pulsar Message Deserializer needs to be callable")
else:
self.deserializer = self.default_deserializer
# First generate the config
self.logConfFileName = GenerateLogConfig(context)
self.logger.info("Generated LogConf at %s" % self.logConfFileName)
# We currently use the high level consumer API
# For supporting effectively once, we will need to switch
# to using lower level Reader API, when it becomes
# available in python
self.client = pulsar.Client(self.pulsar_cluster, log_conf_file_path=self.logConfFileName)
self.logger.info("Setup Client with cluster %s" % self.pulsar_cluster)
try:
self.consumer = self.client.subscribe(self.topic, context.get_topology_name(),
consumer_type=pulsar.ConsumerType.Failover,
unacked_messages_timeout_ms=self.acking_timeout)
except Exception as e:
self.logger.fatal("Pulsar client subscription failed: %s" % str(e))
self.logger.info("Subscribed to topic %s" % self.topic) | Implements Pulsar Spout's initialize method |
def _set_session_ldp_stats(self, v, load=False):
"""
Setter method for session_ldp_stats, mapped from YANG variable /mpls_state/ldp/ldp_session/session_ldp_stats (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_ldp_stats is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_ldp_stats() directly.
YANG Description: Session LDP stats
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=session_ldp_stats.session_ldp_stats, is_container='container', presence=False, yang_name="session-ldp-stats", rest_name="session-ldp-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-session-ldp-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """session_ldp_stats must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=session_ldp_stats.session_ldp_stats, is_container='container', presence=False, yang_name="session-ldp-stats", rest_name="session-ldp-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-session-ldp-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__session_ldp_stats = t
if hasattr(self, '_set'):
self._set() | Setter method for session_ldp_stats, mapped from YANG variable /mpls_state/ldp/ldp_session/session_ldp_stats (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_ldp_stats is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_ldp_stats() directly.
YANG Description: Session LDP stats |
def _sb_short_word(self, term, r1_prefixes=None):
"""Return True iff term is a short word.
(...according to the Porter2 specification.)
Parameters
----------
term : str
The term to examine
r1_prefixes : set
Prefixes to consider
Returns
-------
bool
True iff term is a short word
"""
if self._sb_r1(term, r1_prefixes) == len(
term
) and self._sb_ends_in_short_syllable(term):
return True
return False | Return True iff term is a short word.
(...according to the Porter2 specification.)
Parameters
----------
term : str
The term to examine
r1_prefixes : set
Prefixes to consider
Returns
-------
bool
True iff term is a short word |
def facts(puppet=False):
'''
Run facter and return the results
CLI Example:
.. code-block:: bash
salt '*' puppet.facts
'''
ret = {}
opt_puppet = '--puppet' if puppet else ''
cmd_ret = __salt__['cmd.run_all']('facter {0}'.format(opt_puppet))
if cmd_ret['retcode'] != 0:
raise CommandExecutionError(cmd_ret['stderr'])
output = cmd_ret['stdout']
# Loop over the facter output and properly
# parse it into a nice dictionary for using
# elsewhere
for line in output.splitlines():
if not line:
continue
fact, value = _format_fact(line)
if not fact:
continue
ret[fact] = value
return ret | Run facter and return the results
CLI Example:
.. code-block:: bash
salt '*' puppet.facts |
def query_obj(self):
"""Returns the query object for this visualization"""
d = super().query_obj()
d['row_limit'] = self.form_data.get(
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_columns = self.form_data.get('all_columns_x')
if numeric_columns is None:
raise Exception(_('Must have at least one numeric column specified'))
self.columns = numeric_columns
d['columns'] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d['groupby'] = []
return d | Returns the query object for this visualization |
def config(_config=None, **kwargs):
"""
A decorator for setting the default kwargs of `BaseHandler.crawl`.
Any self.crawl with this callback will use this config.
"""
if _config is None:
_config = {}
_config.update(kwargs)
def wrapper(func):
func._config = _config
return func
return wrapper | A decorator for setting the default kwargs of `BaseHandler.crawl`.
Any self.crawl with this callback will use this config. |
def request_token():
"""
通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
}
"""
while True:
email, password, captcha_solution, captcha_id = win_login()
options = {
'source': 'radio',
'alias': email,
'form_password': password,
'captcha_solution': captcha_solution,
'captcha_id': captcha_id,
'task': 'sync_channel_list'
}
r = requests.post('https://douban.fm/j/login', data=options, headers=HEADERS)
req_json = json.loads(r.text, object_hook=decode_dict)
# req_json = json.loads(r.text)
if req_json['r'] == 0:
post_data = {
# will not save
'liked': req_json['user_info']['play_record']['liked'],
'banned': req_json['user_info']['play_record']['banned'],
'played': req_json['user_info']['play_record']['played'],
'is_pro': req_json['user_info']['is_pro'],
'user_name': req_json['user_info']['name'],
# to save
'cookies': r.cookies,
'valume': 50,
'channel': 0,
'theme_id': 0
}
return post_data
print(req_json['err_msg'])
print(ERROR + req_json['err_msg']) | 通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
} |
def modelSetCompleted(self, modelID, completionReason, completionMsg,
cpuTime=0, useConnectionID=True):
""" Mark a model as completed, with the given completionReason and
completionMsg. This will fail if the model does not currently belong to this
client (connection_id doesn't match).
Parameters:
----------------------------------------------------------------
modelID: model ID of model to modify
completionReason: completionReason string
completionMsg: completionMsg string
cpuTime: amount of CPU time spent on this model
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the
job. Set to True for hypersearch workers, which use
this mechanism for orphaned model detection.
"""
if completionMsg is None:
completionMsg = ''
query = 'UPDATE %s SET status=%%s, ' \
' completion_reason=%%s, ' \
' completion_msg=%%s, ' \
' end_time=UTC_TIMESTAMP(), ' \
' cpu_time=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP(), ' \
' update_counter=update_counter+1 ' \
' WHERE model_id=%%s' \
% (self.modelsTableName,)
sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,
cpuTime, modelID]
if useConnectionID:
query += " AND _eng_worker_conn_id=%s"
sqlParams.append(self._connectionID)
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if numRowsAffected != 1:
raise InvalidConnectionException(
("Tried to set modelID=%r using connectionID=%r, but this model "
"belongs to some other worker or modelID not found; "
"numRowsAffected=%r") % (modelID, self._connectionID, numRowsAffected)) | Mark a model as completed, with the given completionReason and
completionMsg. This will fail if the model does not currently belong to this
client (connection_id doesn't match).
Parameters:
----------------------------------------------------------------
modelID: model ID of model to modify
completionReason: completionReason string
completionMsg: completionMsg string
cpuTime: amount of CPU time spent on this model
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the
job. Set to True for hypersearch workers, which use
this mechanism for orphaned model detection. |
async def send_initial_metadata(self, *, metadata=None):
"""Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
:param metadata: custom initial metadata, dict or list of pairs
"""
if self._send_initial_metadata_done:
raise ProtocolError('Initial metadata was already sent')
headers = [
(':status', '200'),
('content-type', self._content_type),
]
metadata = MultiDict(metadata or ())
metadata, = await self._dispatch.send_initial_metadata(metadata)
headers.extend(encode_metadata(metadata))
await self._stream.send_headers(headers)
self._send_initial_metadata_done = True | Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
:param metadata: custom initial metadata, dict or list of pairs |
def get_route_lines_route(self, **kwargs):
"""Obtain itinerary for one or more lines in the given date.
Args:
day (int): Day of the month in format DD.
The number is automatically padded if it only has one digit.
month (int): Month number in format MM.
The number is automatically padded if it only has one digit.
year (int): Year number in format YYYY.
lines (list[int] | int): Lines to query, may be empty to get
all the lines.
Returns:
Status boolean and parsed response (list[RouteLinesItem]), or message
string in case of error.
"""
# Endpoint parameters
select_date = '%02d/%02d/%d' % (
kwargs.get('day', '01'),
kwargs.get('month', '01'),
kwargs.get('year', '1970')
)
params = {
'SelectDate': select_date,
'Lines': util.ints_to_string(kwargs.get('lines', []))
}
# Request
result = self.make_request('geo', 'get_route_lines_route', **params)
if not util.check_result(result):
return False, result.get('resultDescription', 'UNKNOWN ERROR')
# Parse
values = util.response_list(result, 'resultValues')
return True, [emtype.RouteLinesItem(**a) for a in values] | Obtain itinerary for one or more lines in the given date.
Args:
day (int): Day of the month in format DD.
The number is automatically padded if it only has one digit.
month (int): Month number in format MM.
The number is automatically padded if it only has one digit.
year (int): Year number in format YYYY.
lines (list[int] | int): Lines to query, may be empty to get
all the lines.
Returns:
Status boolean and parsed response (list[RouteLinesItem]), or message
string in case of error. |
def _is_viable_phone_number(number):
"""Checks to see if a string could possibly be a phone number.
At the moment, checks to see that the string begins with at least 2
digits, ignoring any punctuation commonly found in phone numbers. This
method does not require the number to be normalized in advance - but does
assume that leading non-number symbols have been removed, such as by the
method _extract_possible_number.
Arguments:
number -- string to be checked for viability as a phone number
Returns True if the number could be a phone number of some sort, otherwise
False
"""
if len(number) < _MIN_LENGTH_FOR_NSN:
return False
match = fullmatch(_VALID_PHONE_NUMBER_PATTERN, number)
return bool(match) | Checks to see if a string could possibly be a phone number.
At the moment, checks to see that the string begins with at least 2
digits, ignoring any punctuation commonly found in phone numbers. This
method does not require the number to be normalized in advance - but does
assume that leading non-number symbols have been removed, such as by the
method _extract_possible_number.
Arguments:
number -- string to be checked for viability as a phone number
Returns True if the number could be a phone number of some sort, otherwise
False |
def create(self, friendly_name, event_callback_url=values.unset,
events_filter=values.unset, multi_task_enabled=values.unset,
template=values.unset, prioritize_queue_order=values.unset):
"""
Create a new WorkspaceInstance
:param unicode friendly_name: Human readable description of this workspace
:param unicode event_callback_url: If provided, the Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param bool multi_task_enabled: Multi tasking allows workers to handle multiple tasks simultaneously.
:param unicode template: One of the available template names.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Newly created WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'EventCallbackUrl': event_callback_url,
'EventsFilter': events_filter,
'MultiTaskEnabled': multi_task_enabled,
'Template': template,
'PrioritizeQueueOrder': prioritize_queue_order,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return WorkspaceInstance(self._version, payload, ) | Create a new WorkspaceInstance
:param unicode friendly_name: Human readable description of this workspace
:param unicode event_callback_url: If provided, the Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param bool multi_task_enabled: Multi tasking allows workers to handle multiple tasks simultaneously.
:param unicode template: One of the available template names.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Newly created WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance |
def show_tooltip(self, pos, tooltip, _sender_deco=None):
"""
Show a tool tip at the specified position
:param pos: Tooltip position
:param tooltip: Tooltip text
:param _sender_deco: TextDecoration which is the sender of the show
tooltip request. (for internal use only).
"""
if _sender_deco is not None and _sender_deco not in self.decorations:
return
QtWidgets.QToolTip.showText(pos, tooltip[0: 1024], self) | Show a tool tip at the specified position
:param pos: Tooltip position
:param tooltip: Tooltip text
:param _sender_deco: TextDecoration which is the sender of the show
tooltip request. (for internal use only). |
def set_cores_massive(self,filename='core_masses_massive.txt'):
'''
Uesse function cores in nugridse.py
'''
core_info=[]
minis=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
mini=sefiles.get('mini')
minis.append(mini)
incycle=int(sefiles.se.cycles[-1])
core_info.append(sefiles.cores(incycle=incycle))
print_info=''
for i in range(len(self.runs_H5_surf)):
if i ==0:
print 'Following returned for each initial mass'
print core_info[i][1]
#print '----Mini: ',minis[i],'------'
print_info+=(str(minis[i])+' & ')
info=core_info[i][0]
for k in range(len(info)):
print_info+=('{:.3E}'.format(float(core_info[i][0][k]))+' & ')
print_info=(print_info+'\n')
#print core_info[i][2]
f1=open(filename,'a')
f1.write(print_info)
f1.close() | Uesse function cores in nugridse.py |
def read_uic1tag(fh, byteorder, dtype, count, offsetsize, planecount=None):
"""Read MetaMorph STK UIC1Tag from file and return as dict.
Return empty dictionary if planecount is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'ZDistance': values[:, 0] / values[:, 1]}
elif planecount:
for _ in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, planecount, offset=True)
result[name] = value
return result | Read MetaMorph STK UIC1Tag from file and return as dict.
Return empty dictionary if planecount is unknown. |
def create_space(deployment_name,
space_name,
security_policy='public',
events_retention_days=0,
metrics_retention_days=0,
token_manager=None,
app_url=defaults.APP_URL):
"""
create a space within the deployment specified and with the various
rentention values set
"""
deployment_id = get_deployment_id(deployment_name,
token_manager=token_manager,
app_url=app_url)
payload = {
'name': space_name,
'security_policy': security_policy,
'events_retention_days': events_retention_days,
'metrics_retention_days': metrics_retention_days,
}
headers = token_manager.get_access_token_headers()
deployment_url = environment.get_deployment_url(app_url=app_url)
response = requests.post('%s/api/v1/deployments/%s/spaces' %
(deployment_url, deployment_id),
data=json.dumps(payload),
headers=headers)
if response.status_code == 201:
return response.json()
else:
raise JutException('Error %s: %s' % (response.status_code, response.text)) | create a space within the deployment specified and with the various
rentention values set |
def _clean_html(html):
"""\
Removes links (``<a href="...">...</a>``) from the provided HTML input.
Further, it replaces "
" with ``\n`` and removes "¶" from the texts.
"""
content = html.replace(u'
', u'\n').replace(u'¶', '')
content = _LINK_PATTERN.sub(u'', content)
content = _HTML_TAG_PATTERN.sub(u'', content)
content = _BACKSLASH_PATTERN.sub(u'\n', content)
return content | \
Removes links (``<a href="...">...</a>``) from the provided HTML input.
Further, it replaces "
" with ``\n`` and removes "¶" from the texts. |
def add(name, mac, mtu=1500):
'''
Add a new nictag
name : string
name of new nictag
mac : string
mac of parent interface or 'etherstub' to create a ether stub
mtu : int
MTU (ignored for etherstubs)
CLI Example:
.. code-block:: bash
salt '*' nictagadm.add storage0 etherstub
salt '*' nictagadm.add trunk0 'DE:AD:OO:OO:BE:EF' 9000
'''
ret = {}
if mtu > 9000 or mtu < 1500:
return {'Error': 'mtu must be a value between 1500 and 9000.'}
if mac != 'etherstub':
cmd = 'dladm show-phys -m -p -o address'
res = __salt__['cmd.run_all'](cmd)
# dladm prints '00' as '0', so account for that.
if mac.replace('00', '0') not in res['stdout'].splitlines():
return {'Error': '{0} is not present on this system.'.format(mac)}
if mac == 'etherstub':
cmd = 'nictagadm add -l {0}'.format(name)
res = __salt__['cmd.run_all'](cmd)
else:
cmd = 'nictagadm add -p mtu={0},mac={1} {2}'.format(mtu, mac, name)
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
return True
else:
return {'Error': 'failed to create nictag.' if 'stderr' not in res and res['stderr'] == '' else res['stderr']} | Add a new nictag
name : string
name of new nictag
mac : string
mac of parent interface or 'etherstub' to create a ether stub
mtu : int
MTU (ignored for etherstubs)
CLI Example:
.. code-block:: bash
salt '*' nictagadm.add storage0 etherstub
salt '*' nictagadm.add trunk0 'DE:AD:OO:OO:BE:EF' 9000 |
def from_yamlfile(cls, fp, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
return cls.from_yamlstring(fp.read(), selector_handler=selector_handler, strict=strict, debug=debug) | Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor |
def _managePsets(configobj, section_name, task_name, iparsobj=None, input_dict=None):
""" Read in parameter values from PSET-like configobj tasks defined for
source-finding algorithms, and any other PSET-like tasks under this task,
and merge those values into the input configobj dictionary.
"""
# Merge all configobj instances into a single object
configobj[section_name] = {}
# Load the default full set of configuration parameters for the PSET:
iparsobj_cfg = teal.load(task_name)
# Identify optional parameters in input_dicts that are from this
# PSET and add it to iparsobj:
if input_dict is not None:
for key in list(input_dict.keys()):
if key in iparsobj_cfg:
if iparsobj is not None and key in iparsobj:
raise DuplicateKeyError("Duplicate parameter '{:s}' "
"provided for task {:s}".format(key, task_name))
iparsobj_cfg[key] = input_dict[key]
del input_dict[key]
if iparsobj is not None:
iparsobj_cfg.update(iparsobj)
del iparsobj_cfg['_task_name_']
# merge these parameters into full set
configobj[section_name].merge(iparsobj_cfg) | Read in parameter values from PSET-like configobj tasks defined for
source-finding algorithms, and any other PSET-like tasks under this task,
and merge those values into the input configobj dictionary. |
def parse(self, stream):
"""
Parse the given stream
"""
lines = re.sub("[\r\n]+", "\n", stream.read()).split("\n")
for line in lines:
self.parseline(line) | Parse the given stream |
def packageRootPath(path):
"""
Returns the root file path that defines a Python package from the inputted
path.
:param path | <str>
:return <str>
"""
path = nstr(path)
if os.path.isfile(path):
path = os.path.dirname(path)
parts = os.path.normpath(path).split(os.path.sep)
package_parts = []
for i in range(len(parts), 0, -1):
filename = os.path.sep.join(parts[:i] + ['__init__.py'])
if not os.path.isfile(filename):
break
package_parts.insert(0, parts[i - 1])
if not package_parts:
return path
return os.path.abspath(os.path.sep.join(parts[:-len(package_parts)])) | Returns the root file path that defines a Python package from the inputted
path.
:param path | <str>
:return <str> |
def transform(transform_func):
"""Apply a transformation to a functions return value"""
def decorator(func):
@wraps(func)
def f(*args, **kwargs):
return transform_func(
func(*args, **kwargs)
)
return f
return decorator | Apply a transformation to a functions return value |
def parse_csv_headers(dataset_id):
"""Return the first row of a CSV as a list of headers."""
data = Dataset.objects.get(pk=dataset_id)
with open(data.dataset_file.path, 'r') as datasetFile:
csvReader = reader(datasetFile, delimiter=',', quotechar='"')
headers = next(csvReader)
# print headers
return headers | Return the first row of a CSV as a list of headers. |
def populate_results_dict(self, sample, gene, total_mismatches, genome_pos, amplicon_length, contig, primer_set):
"""
Populate the results dictionary with the required key: value pairs
:param sample: type MetadataObject: Current metadata sample to process
:param gene: type STR: Gene of interest
:param total_mismatches: type INT: Number of mismatches between primer pairs and subject sequence
:param genome_pos: type STR: Positions of 5' and 3' ends of the amplicon
:param amplicon_length: type INT: Total length of the amplicon
:param contig: type STR: Contig name
:param primer_set: type STR: Name of primer set from the ePCR-formatted file used in the analyses
"""
sample[self.analysistype].result_dict[gene] = {
'total_mismatches': total_mismatches,
'genome_pos': genome_pos,
'amplicon_length': amplicon_length,
'contig': contig,
'primer_set': primer_set
} | Populate the results dictionary with the required key: value pairs
:param sample: type MetadataObject: Current metadata sample to process
:param gene: type STR: Gene of interest
:param total_mismatches: type INT: Number of mismatches between primer pairs and subject sequence
:param genome_pos: type STR: Positions of 5' and 3' ends of the amplicon
:param amplicon_length: type INT: Total length of the amplicon
:param contig: type STR: Contig name
:param primer_set: type STR: Name of primer set from the ePCR-formatted file used in the analyses |
def highlights_worker(work_unit):
'''coordinate worker wrapper around :func:`maybe_create_highlights`
'''
if 'config' not in work_unit.spec:
raise coordinate.exceptions.ProgrammerError(
'could not run `create_highlights` without global config')
web_conf = Config()
unitconf = work_unit.spec['config']
with yakonfig.defaulted_config([coordinate, kvlayer, dblogger, web_conf],
config=unitconf):
file_id = make_file_id(work_unit.key)
web_conf.kvlclient.setup_namespace(highlights_kvlayer_tables)
payload_strs = list(web_conf.kvlclient.get('files', file_id))
if payload_strs and payload_strs[0][1]:
payload_str = payload_strs[0][1]
try:
data = json.loads(payload_str)
# now create the response payload
maybe_store_highlights(file_id, data, web_conf.tfidf, web_conf.kvlclient)
except Exception, exc:
logger.critical('failed to decode data out of %r',
payload_str, exc_info=True)
payload = {
'state': ERROR,
'error': {
'code': 7,
'message': 'failed to generate stored results:\n%s' % \
traceback.format_exc(exc)}
}
payload_str = json.dumps(payload)
kvlclient.put('highlights', (file_id, payload_str)) | coordinate worker wrapper around :func:`maybe_create_highlights` |
def get_closest_points(self, mesh):
"""
For each point in ``mesh`` find the closest surface element, and return
the corresponding closest point.
See :meth:`superclass method
<.base.BaseSurface.get_closest_points>`
for spec of input and result values.
"""
# first, for each point in mesh compute minimum distance to each
# surface. The distance matrix is flattend, because mesh can be of
# an arbitrary shape. By flattening we obtain a ``distances`` matrix
# for which the first dimension represents the different surfaces
# and the second dimension the mesh points.
dists = numpy.array(
[surf.get_min_distance(mesh).flatten() for surf in self.surfaces]
)
# find for each point in mesh the index of closest surface
idx = dists == numpy.min(dists, axis=0)
# loop again over surfaces. For each surface compute the closest
# points, and associate them to the mesh points for which the surface
# is the closest. Note that if a surface is not the closest to any of
# the mesh points then the calculation is skipped
lons = numpy.empty_like(mesh.lons.flatten())
lats = numpy.empty_like(mesh.lats.flatten())
depths = None if mesh.depths is None else \
numpy.empty_like(mesh.depths.flatten())
for i, surf in enumerate(self.surfaces):
if not idx[i, :].any():
continue
cps = surf.get_closest_points(mesh)
lons[idx[i, :]] = cps.lons.flatten()[idx[i, :]]
lats[idx[i, :]] = cps.lats.flatten()[idx[i, :]]
if depths is not None:
depths[idx[i, :]] = cps.depths.flatten()[idx[i, :]]
lons = lons.reshape(mesh.lons.shape)
lats = lats.reshape(mesh.lats.shape)
if depths is not None:
depths = depths.reshape(mesh.depths.shape)
return Mesh(lons, lats, depths) | For each point in ``mesh`` find the closest surface element, and return
the corresponding closest point.
See :meth:`superclass method
<.base.BaseSurface.get_closest_points>`
for spec of input and result values. |
def get_assessment_admin_session_for_bank(self, bank_id):
"""Gets the ``OsidSession`` associated with the assessment admin service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.AssessmentAdminSession) - ``an
_assessment_admin_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_assessment_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_assessment_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.AssessmentAdminSession(bank_id, runtime=self._runtime) | Gets the ``OsidSession`` associated with the assessment admin service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.AssessmentAdminSession) - ``an
_assessment_admin_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_assessment_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_admin()`` and
``supports_visible_federation()`` are ``true``.* |
def get_next_assessment_part_id(self, assessment_part_id=None):
"""This supports the basic simple sequence case. Can be overriden in a record for other cases"""
if assessment_part_id is None:
part_id = self.get_id()
else:
part_id = assessment_part_id
return get_next_part_id(part_id,
runtime=self._runtime,
proxy=self._proxy,
sequestered=True)[0] | This supports the basic simple sequence case. Can be overriden in a record for other cases |
def _lookup_generic_scalar(self,
obj,
as_of_date,
country_code,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
result = self._lookup_generic_scalar_helper(
obj, as_of_date, country_code,
)
if result is not None:
matches.append(result)
else:
missing.append(obj) | Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing. |
def _add_current_usage(self, value, maximum=None, resource_id=None,
aws_type=None):
"""
Add a new current usage value for this limit.
Creates a new :py:class:`~.AwsLimitUsage` instance and
appends it to the internal list. If more than one usage value
is given to this service, they should have ``id`` and
``aws_type`` set.
This method should only be called from the :py:class:`~._AwsService`
instance that created and manages this Limit.
:param value: the numeric usage value
:type value: :py:obj:`int` or :py:obj:`float`
:param resource_id: If there can be multiple usage values for one limit,
an AWS ID for the resource this instance describes
:type resource_id: str
:param aws_type: if ``id`` is not None, the AWS resource type
that ID represents. As a convention, we use the AWS Resource
Type names used by
`CloudFormation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html>`_ # noqa
:type aws_type: str
"""
self._current_usage.append(
AwsLimitUsage(
self,
value,
maximum=maximum,
resource_id=resource_id,
aws_type=aws_type
)
) | Add a new current usage value for this limit.
Creates a new :py:class:`~.AwsLimitUsage` instance and
appends it to the internal list. If more than one usage value
is given to this service, they should have ``id`` and
``aws_type`` set.
This method should only be called from the :py:class:`~._AwsService`
instance that created and manages this Limit.
:param value: the numeric usage value
:type value: :py:obj:`int` or :py:obj:`float`
:param resource_id: If there can be multiple usage values for one limit,
an AWS ID for the resource this instance describes
:type resource_id: str
:param aws_type: if ``id`` is not None, the AWS resource type
that ID represents. As a convention, we use the AWS Resource
Type names used by
`CloudFormation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html>`_ # noqa
:type aws_type: str |
def get_ngrok_public_url():
"""Get the ngrok public HTTP URL from the local client API."""
try:
response = requests.get(url=NGROK_CLIENT_API_BASE_URL + "/tunnels",
headers={'content-type': 'application/json'})
response.raise_for_status()
except requests.exceptions.RequestException:
print("Could not connect to the ngrok client API; "
"assuming not running.")
return None
else:
for tunnel in response.json()["tunnels"]:
if tunnel.get("public_url", "").startswith("http://"):
print("Found ngrok public HTTP URL:", tunnel["public_url"])
return tunnel["public_url"] | Get the ngrok public HTTP URL from the local client API. |
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features).
Test samples.
Returns
-------
y : array of shape [n_samples]
Class labels for each data sample.
"""
# TODO: Make classification of multiple samples a bit more effective...
if X.ndim > 1 and X.shape[1] != 1:
out = []
for x in X:
out += self.predict(x)
return out
X = X.flatten()
if self.metric == 'minkowski':
dists = np.sum(np.abs(self._data - X) ** self.p, axis=1)
else:
# TODO: Implement other metrics.
raise ValueError("Only Minkowski distance metric implemented...")
argument = np.argsort(dists)
labels = self._labels[argument[:self.n_neighbors]]
if self.weights == 'distance':
weights = 1 / dists[argument[:self.n_neighbors]]
out = np.zeros((len(self._classes), ), 'float')
for i, c in enumerate(self._classes):
out[i] = np.sum(weights[labels == c])
out /= np.sum(out)
y_pred = self._labels[np.argmax(out)]
else:
y_pred, _ = mode(labels)
return y_pred.tolist() | Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features).
Test samples.
Returns
-------
y : array of shape [n_samples]
Class labels for each data sample. |
def create(self, chat_id=None, name=None, owner=None, user_list=None):
"""
创建群聊会话
详情请参考
https://work.weixin.qq.com/api/doc#90000/90135/90245
限制说明:
只允许企业自建应用调用,且应用的可见范围必须是根部门;
群成员人数不可超过管理端配置的“群成员人数上限”,且最大不可超过500人;
每企业创建群数不可超过1000/天;
:param chat_id: 群聊的唯一标志,不能与已有的群重复;字符串类型,最长32个字符。只允许字符0-9及字母a-zA-Z。如果不填,系统会随机生成群id
:param name: 群聊名,最多50个utf8字符,超过将截断
:param owner: 指定群主的id。如果不指定,系统会随机从userlist中选一人作为群主
:param user_list: 会话成员列表,成员用userid来标识。至少2人,至多500人
:return: 返回的 JSON 数据包
"""
data = optionaldict(
chatid=chat_id,
name=name,
owner=owner,
userlist=user_list,
)
return self._post('appchat/create', data=data) | 创建群聊会话
详情请参考
https://work.weixin.qq.com/api/doc#90000/90135/90245
限制说明:
只允许企业自建应用调用,且应用的可见范围必须是根部门;
群成员人数不可超过管理端配置的“群成员人数上限”,且最大不可超过500人;
每企业创建群数不可超过1000/天;
:param chat_id: 群聊的唯一标志,不能与已有的群重复;字符串类型,最长32个字符。只允许字符0-9及字母a-zA-Z。如果不填,系统会随机生成群id
:param name: 群聊名,最多50个utf8字符,超过将截断
:param owner: 指定群主的id。如果不指定,系统会随机从userlist中选一人作为群主
:param user_list: 会话成员列表,成员用userid来标识。至少2人,至多500人
:return: 返回的 JSON 数据包 |
def commit_signature(vcs, user_config, signature):
"""Add `signature` to the list of committed signatures
The signature must already be staged
Args:
vcs (easyci.vcs.base.Vcs)
user_config (dict)
signature (basestring)
Raises:
NotStagedError
AlreadyCommittedError
"""
if signature not in get_staged_signatures(vcs):
raise NotStagedError
evidence_path = _get_committed_history_path(vcs)
committed_signatures = get_committed_signatures(vcs)
if signature in committed_signatures:
raise AlreadyCommittedError
committed_signatures.append(signature)
string = '\n'.join(committed_signatures[-user_config['history_limit']:])
with open(evidence_path, 'w') as f:
f.write(string)
unstage_signature(vcs, signature) | Add `signature` to the list of committed signatures
The signature must already be staged
Args:
vcs (easyci.vcs.base.Vcs)
user_config (dict)
signature (basestring)
Raises:
NotStagedError
AlreadyCommittedError |
def delete(self):
""" Delete this source """
r = self._client.request('DELETE', self.url)
logger.info("delete(): %s", r.status_code) | Delete this source |
def lineup_user(self,userid):
'''Get user lineup using a ID'''
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/standings.phtml',"User-Agent": user_agent}
req = self.session.get('http://'+self.domain+'/playerInfo.phtml?pid='+userid,headers=headers).content
soup = BeautifulSoup(req)
info = []
for i in soup.find_all('td',{'class':'name_cont'}):
info.append(i.text.strip())
return info | Get user lineup using a ID |
def main():
"""
NAME
dia_vgp.py
DESCRIPTION
converts declination inclination alpha95 to virtual geomagnetic pole, dp and dm
SYNTAX
dia_vgp.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
D I A95 SLAT SLON
where:
D: declination
I: inclination
A95: alpha_95
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT DP DM
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
DP: 95% confidence angle in parallel
DM: 95% confidence angle in meridian
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv: # if one is -i
while 1:
try:
ans=input("Input Declination: <cntrl-D to quit> ")
Dec=float(ans) # assign input to Dec, after conversion to floating point
ans=input("Input Inclination: ")
Inc =float(ans)
ans=input("Input Alpha 95: ")
a95 =float(ans)
ans=input("Input Site Latitude: ")
slat =float(ans)
ans=input("Input Site Longitude: ")
slong =float(ans)
spitout(Dec,Inc,a95,slat,slong) # call dia_vgp function from pmag module
print('%7.1f %7.1f %7.1f %7.1f'%(plong,plat,dp,dm)) # print out returned stuff
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # manual input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inlist = []
for line in f.readlines():
inlist.append([])
# loop over the elements, split by whitespace
for el in line.split():
inlist[-1].append(float(el))
spitout(inlist)
else:
input = sys.stdin.readlines() # read from standard input
inlist = []
for line in input: # read in the data (as string variable), line by line
inlist.append([])
# loop over the elements, split by whitespace
for el in line.split():
inlist[-1].append(float(el))
spitout(inlist) | NAME
dia_vgp.py
DESCRIPTION
converts declination inclination alpha95 to virtual geomagnetic pole, dp and dm
SYNTAX
dia_vgp.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
D I A95 SLAT SLON
where:
D: declination
I: inclination
A95: alpha_95
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT DP DM
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
DP: 95% confidence angle in parallel
DM: 95% confidence angle in meridian |
def title_from_content(content):
"""
Try and extract the first sentence from a block of test to use as a title.
"""
for end in (". ", "?", "!", "<br />", "\n", "</p>"):
if end in content:
content = content.split(end)[0] + end
break
return strip_tags(content) | Try and extract the first sentence from a block of test to use as a title. |
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file.
The section must have the names of the polar and azimuthal angles in
the tag part of the section header. For example:
.. code-block:: ini
[prior-theta+phi]
name = uniform_solidangle
If nothing else is provided, the default names and bounds of the polar
and azimuthal angles will be used. To specify a different name for
each angle, set the `polar-angle` and `azimuthal-angle` attributes. For
example:
.. code-block:: ini
[prior-foo+bar]
name = uniform_solidangle
polar-angle = foo
azimuthal-angle = bar
Note that the names of the variable args in the tag part of the section
name must match the names of the polar and azimuthal angles.
Bounds may also be specified for each angle, as factors of pi. For
example:
.. code-block:: ini
[prior-theta+phi]
polar-angle = theta
azimuthal-angle = phi
min-theta = 0
max-theta = 0.5
This will return a distribution that is uniform in the upper
hemisphere.
By default, the domain of the azimuthal angle is `[0, 2pi)`. To make
this domain cyclic, add `azimuthal_cyclic_domain =`.
Parameters
----------
cp : ConfigParser instance
The config file.
section : str
The name of the section.
variable_args : str
The names of the parameters for this distribution, separated by
``VARARGS_DELIM``. These must appear in the "tag" part
of the section header.
Returns
-------
UniformSolidAngle
A distribution instance from the pycbc.inference.prior module.
"""
tag = variable_args
variable_args = variable_args.split(VARARGS_DELIM)
# get the variables that correspond to the polar/azimuthal angles
try:
polar_angle = cp.get_opt_tag(section, 'polar-angle', tag)
except Error:
polar_angle = cls._default_polar_angle
try:
azimuthal_angle = cp.get_opt_tag(section, 'azimuthal-angle', tag)
except Error:
azimuthal_angle = cls._default_azimuthal_angle
if polar_angle not in variable_args:
raise Error("polar-angle %s is not one of the variable args (%s)"%(
polar_angle, ', '.join(variable_args)))
if azimuthal_angle not in variable_args:
raise Error("azimuthal-angle %s is not one of the variable args "%(
azimuthal_angle) + "(%s)"%(', '.join(variable_args)))
# get the bounds, if provided
polar_bounds = bounded.get_param_bounds_from_config(
cp, section, tag,
polar_angle)
azimuthal_bounds = bounded.get_param_bounds_from_config(
cp, section, tag,
azimuthal_angle)
# see if the a cyclic domain is desired for the azimuthal angle
azimuthal_cyclic_domain = cp.has_option_tag(section,
'azimuthal_cyclic_domain', tag)
return cls(polar_angle=polar_angle, azimuthal_angle=azimuthal_angle,
polar_bounds=polar_bounds,
azimuthal_bounds=azimuthal_bounds,
azimuthal_cyclic_domain=azimuthal_cyclic_domain) | Returns a distribution based on a configuration file.
The section must have the names of the polar and azimuthal angles in
the tag part of the section header. For example:
.. code-block:: ini
[prior-theta+phi]
name = uniform_solidangle
If nothing else is provided, the default names and bounds of the polar
and azimuthal angles will be used. To specify a different name for
each angle, set the `polar-angle` and `azimuthal-angle` attributes. For
example:
.. code-block:: ini
[prior-foo+bar]
name = uniform_solidangle
polar-angle = foo
azimuthal-angle = bar
Note that the names of the variable args in the tag part of the section
name must match the names of the polar and azimuthal angles.
Bounds may also be specified for each angle, as factors of pi. For
example:
.. code-block:: ini
[prior-theta+phi]
polar-angle = theta
azimuthal-angle = phi
min-theta = 0
max-theta = 0.5
This will return a distribution that is uniform in the upper
hemisphere.
By default, the domain of the azimuthal angle is `[0, 2pi)`. To make
this domain cyclic, add `azimuthal_cyclic_domain =`.
Parameters
----------
cp : ConfigParser instance
The config file.
section : str
The name of the section.
variable_args : str
The names of the parameters for this distribution, separated by
``VARARGS_DELIM``. These must appear in the "tag" part
of the section header.
Returns
-------
UniformSolidAngle
A distribution instance from the pycbc.inference.prior module. |
def _validate(self):
""" Enforce some structure to the config file """
# This could be done with a default config
# Check that specific keys exist
sections = odict([
('catalog',['dirname','basename',
'lon_field','lat_field','objid_field',
'mag_1_band', 'mag_1_field', 'mag_err_1_field',
'mag_2_band', 'mag_2_field', 'mag_err_2_field',
]),
('mask',[]),
('coords',['nside_catalog','nside_mask','nside_likelihood',
'nside_pixel','roi_radius','roi_radius_annulus',
'roi_radius_interior','coordsys',
]),
('likelihood',[]),
('output',[]),
('batch',[]),
])
keys = np.array(list(sections.keys()))
found = np.in1d(keys,list(self.keys()))
if not np.all(found):
msg = 'Missing sections: '+str(keys[~found])
raise Exception(msg)
for section,keys in sections.items():
keys = np.array(keys)
found = np.in1d(keys,list(self[section].keys()))
if not np.all(found):
msg = 'Missing keys in %s: '%(section)+str(keys[~found])
raise Exception(msg) | Enforce some structure to the config file |
def LoadSecondaryConfig(self, filename=None, parser=None):
"""Loads an additional configuration file.
The configuration system has the concept of a single Primary configuration
file, and multiple secondary files. The primary configuration file is the
main file that is used by the program. Any writebacks will only be made to
the primary configuration file. Secondary files contain additional
configuration data which will be merged into the configuration system.
This method adds an additional configuration file.
Args:
filename: The configuration file that will be loaded. For example
file:///etc/grr.conf or reg://HKEY_LOCAL_MACHINE/Software/GRR.
parser: An optional parser can be given. In this case, the parser's data
will be loaded directly.
Returns:
The parser used to parse this configuration source.
Raises:
ValueError: if both filename and parser arguments are None.
ConfigFileNotFound: If a specified included file was not found.
"""
if filename:
# Maintain a stack of config file locations in loaded order.
self.files.append(filename)
parser_cls = self.GetParserFromFilename(filename)
parser = parser_cls(filename=filename)
logging.debug("Loading configuration from %s", filename)
self.secondary_config_parsers.append(parser)
elif parser is None:
raise ValueError("Must provide either a filename or a parser.")
clone = self.MakeNewConfig()
clone.MergeData(parser.RawData())
clone.initialized = True
for file_to_load in clone["Config.includes"]:
# We can not include a relative file from a config which does not have
# path.
if not os.path.isabs(file_to_load):
if not filename:
raise ConfigFileNotFound(
"While loading %s: Unable to include a relative path (%s) "
"from a config without a filename" % (filename, file_to_load))
# If the included path is relative, we take it as relative to the
# current path of the config.
file_to_load = os.path.join(os.path.dirname(filename), file_to_load)
clone_parser = clone.LoadSecondaryConfig(file_to_load)
# If an include file is specified but it was not found, raise an error.
if not clone_parser.parsed:
raise ConfigFileNotFound(
"Unable to load include file %s" % file_to_load)
self.MergeData(clone.raw_data)
self.files.extend(clone.files)
return parser | Loads an additional configuration file.
The configuration system has the concept of a single Primary configuration
file, and multiple secondary files. The primary configuration file is the
main file that is used by the program. Any writebacks will only be made to
the primary configuration file. Secondary files contain additional
configuration data which will be merged into the configuration system.
This method adds an additional configuration file.
Args:
filename: The configuration file that will be loaded. For example
file:///etc/grr.conf or reg://HKEY_LOCAL_MACHINE/Software/GRR.
parser: An optional parser can be given. In this case, the parser's data
will be loaded directly.
Returns:
The parser used to parse this configuration source.
Raises:
ValueError: if both filename and parser arguments are None.
ConfigFileNotFound: If a specified included file was not found. |
def limit(self, limit):
"""Sets `sysparm_limit`
:param limit: Size limit (int)
"""
if not isinstance(limit, int) or isinstance(limit, bool):
raise InvalidUsage("limit size must be of type integer")
self._sysparms['sysparm_limit'] = limit | Sets `sysparm_limit`
:param limit: Size limit (int) |
def set_default(sld, tld):
'''
Sets domain to use namecheap default DNS servers. Required for free
services like Host record management, URL forwarding, email forwarding,
dynamic DNS and other value added services.
sld
SLD of the domain name
tld
TLD of the domain name
Returns ``True`` if the domain was successfully pointed at the default DNS
servers.
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains_dns.set_default sld tld
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.setDefault')
opts['SLD'] = sld
opts['TLD'] = tld
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return False
dnsresult = response_xml.getElementsByTagName('DomainDNSSetDefaultResult')[0]
return salt.utils.namecheap.string_to_value(dnsresult.getAttribute('Updated')) | Sets domain to use namecheap default DNS servers. Required for free
services like Host record management, URL forwarding, email forwarding,
dynamic DNS and other value added services.
sld
SLD of the domain name
tld
TLD of the domain name
Returns ``True`` if the domain was successfully pointed at the default DNS
servers.
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains_dns.set_default sld tld |
def execute(self):
"""
Execute the job, that is, execute all of its tasks.
Each produced sync map will be stored
inside the corresponding task object.
:raises: :class:`~aeneas.executejob.ExecuteJobExecutionError`: if there is a problem during the job execution
"""
self.log(u"Executing job")
if self.job is None:
self.log_exc(u"The job object is None", None, True, ExecuteJobExecutionError)
if len(self.job) == 0:
self.log_exc(u"The job has no tasks", None, True, ExecuteJobExecutionError)
job_max_tasks = self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]
if (job_max_tasks > 0) and (len(self.job) > job_max_tasks):
self.log_exc(u"The Job has %d Tasks, more than the maximum allowed (%d)." % (len(self.job), job_max_tasks), None, True, ExecuteJobExecutionError)
self.log([u"Number of tasks: '%d'", len(self.job)])
for task in self.job.tasks:
try:
custom_id = task.configuration["custom_id"]
self.log([u"Executing task '%s'...", custom_id])
executor = ExecuteTask(task, rconf=self.rconf, logger=self.logger)
executor.execute()
self.log([u"Executing task '%s'... done", custom_id])
except Exception as exc:
self.log_exc(u"Error while executing task '%s'" % (custom_id), exc, True, ExecuteJobExecutionError)
self.log(u"Executing task: succeeded")
self.log(u"Executing job: succeeded") | Execute the job, that is, execute all of its tasks.
Each produced sync map will be stored
inside the corresponding task object.
:raises: :class:`~aeneas.executejob.ExecuteJobExecutionError`: if there is a problem during the job execution |
def circuit_to_pyquil(circuit: Circuit) -> pyquil.Program:
"""Convert a QuantumFlow circuit to a pyQuil program"""
prog = pyquil.Program()
for elem in circuit.elements:
if isinstance(elem, Gate) and elem.name in QUIL_GATES:
params = list(elem.params.values()) if elem.params else []
prog.gate(elem.name, params, elem.qubits)
elif isinstance(elem, Measure):
prog.measure(elem.qubit, elem.cbit)
else:
# FIXME: more informative error message
raise ValueError('Cannot convert operation to pyquil')
return prog | Convert a QuantumFlow circuit to a pyQuil program |
def get_expiration_date(self, fn):
"""
Reads the expiration date of a local crt file.
"""
r = self.local_renderer
r.env.crt_fn = fn
with hide('running'):
ret = r.local('openssl x509 -noout -in {ssl_crt_fn} -dates', capture=True)
matches = re.findall('notAfter=(.*?)$', ret, flags=re.IGNORECASE)
if matches:
return dateutil.parser.parse(matches[0]) | Reads the expiration date of a local crt file. |
def to_unicode(sorb, allow_eval=False):
r"""Ensure that strings are unicode (UTF-8 encoded).
Evaluate bytes literals that are sometimes accidentally created by str(b'whatever')
>>> to_unicode(b'whatever')
'whatever'
>>> to_unicode(b'b"whatever"')
'whatever'
>>> to_unicode(repr(b'b"whatever"'))
'whatever'
>>> to_unicode(str(b'b"whatever"'))
'whatever'
>>> to_unicode(str(str(b'whatever')))
'whatever'
>>> to_unicode(bytes(u'whatever', 'utf-8'))
'whatever'
>>> to_unicode(b'u"whatever"')
'whatever'
>>> to_unicode(u'b"whatever"')
'whatever'
There seems to be a bug in python3 core:
>>> str(b'whatever') # user intended str.decode(b'whatever') (str coercion) rather than python code repr
"b'whatever'"
>>> repr(str(b'whatever'))
'"b\'whatever\'"'
>>> str(repr(str(b'whatever')))
'"b\'whatever\'"'
>>> repr(str(repr(str(b'whatever'))))
'\'"b\\\'whatever\\\'"\''
>>> repr(repr(b'whatever'))
'"b\'whatever\'"'
>>> str(str(b'whatever'))
"b'whatever'"
>>> str(repr(b'whatever'))
"b'whatever'"
"""
if sorb is None:
return sorb
if isinstance(sorb, bytes):
sorb = sorb.decode()
for i, s in enumerate(["b'", 'b"', "u'", 'u"']):
if (sorb.startswith(s) and sorb.endswith(s[-1])):
# print(i)
return to_unicode(eval(sorb, {'__builtins__': None}, {}))
return sorb | r"""Ensure that strings are unicode (UTF-8 encoded).
Evaluate bytes literals that are sometimes accidentally created by str(b'whatever')
>>> to_unicode(b'whatever')
'whatever'
>>> to_unicode(b'b"whatever"')
'whatever'
>>> to_unicode(repr(b'b"whatever"'))
'whatever'
>>> to_unicode(str(b'b"whatever"'))
'whatever'
>>> to_unicode(str(str(b'whatever')))
'whatever'
>>> to_unicode(bytes(u'whatever', 'utf-8'))
'whatever'
>>> to_unicode(b'u"whatever"')
'whatever'
>>> to_unicode(u'b"whatever"')
'whatever'
There seems to be a bug in python3 core:
>>> str(b'whatever') # user intended str.decode(b'whatever') (str coercion) rather than python code repr
"b'whatever'"
>>> repr(str(b'whatever'))
'"b\'whatever\'"'
>>> str(repr(str(b'whatever')))
'"b\'whatever\'"'
>>> repr(str(repr(str(b'whatever'))))
'\'"b\\\'whatever\\\'"\''
>>> repr(repr(b'whatever'))
'"b\'whatever\'"'
>>> str(str(b'whatever'))
"b'whatever'"
>>> str(repr(b'whatever'))
"b'whatever'" |
def edit_txt(filename, substitutions, newname=None):
"""Primitive text file stream editor.
This function can be used to edit free-form text files such as the
topology file. By default it does an **in-place edit** of
*filename*. If *newname* is supplied then the edited
file is written to *newname*.
:Arguments:
*filename*
input text file
*substitutions*
substitution commands (see below for format)
*newname*
output filename; if ``None`` then *filename* is changed in
place [``None``]
*substitutions* is a list of triplets; the first two elements are regular
expression strings, the last is the substitution value. It mimics
``sed`` search and replace. The rules for *substitutions*:
.. productionlist::
substitutions: "[" search_replace_tuple, ... "]"
search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")"
line_match_RE: regular expression that selects the line (uses match)
search_RE: regular expression that is searched in the line
replacement: replacement string for search_RE
Running :func:`edit_txt` does pretty much what a simple ::
sed /line_match_RE/s/search_RE/replacement/
with repeated substitution commands does.
Special replacement values:
- ``None``: the rule is ignored
- ``False``: the line is deleted (even if other rules match)
.. note::
* No sanity checks are performed and the substitutions must be supplied
exactly as shown.
* All substitutions are applied to a line; thus the order of the substitution
commands may matter when one substitution generates a match for a subsequent rule.
* If replacement is set to ``None`` then the whole expression is ignored and
whatever is in the template is used. To unset values you must provided an
empty string or similar.
* Delete a matching line if replacement=``False``.
"""
if newname is None:
newname = filename
# No sanity checks (figure out later how to give decent diagnostics).
# Filter out any rules that have None in replacement.
_substitutions = [{'lRE': re.compile(str(lRE)),
'sRE': re.compile(str(sRE)),
'repl': repl}
for lRE,sRE,repl in substitutions if repl is not None]
with tempfile.TemporaryFile() as target:
with open(filename, 'rb') as src:
logger.info("editing txt = {0!r} ({1:d} substitutions)".format(filename, len(substitutions)))
for line in src:
line = line.decode("utf-8")
keep_line = True
for subst in _substitutions:
m = subst['lRE'].match(line)
if m: # apply substition to this line?
logger.debug('match: '+line.rstrip())
if subst['repl'] is False: # special rule: delete line
keep_line = False
else: # standard replacement
line = subst['sRE'].sub(str(subst['repl']), line)
logger.debug('replaced: '+line.rstrip())
if keep_line:
target.write(line.encode('utf-8'))
else:
logger.debug("Deleting line %r", line)
target.seek(0)
with open(newname, 'wb') as final:
shutil.copyfileobj(target, final)
logger.info("edited txt = {newname!r}".format(**vars())) | Primitive text file stream editor.
This function can be used to edit free-form text files such as the
topology file. By default it does an **in-place edit** of
*filename*. If *newname* is supplied then the edited
file is written to *newname*.
:Arguments:
*filename*
input text file
*substitutions*
substitution commands (see below for format)
*newname*
output filename; if ``None`` then *filename* is changed in
place [``None``]
*substitutions* is a list of triplets; the first two elements are regular
expression strings, the last is the substitution value. It mimics
``sed`` search and replace. The rules for *substitutions*:
.. productionlist::
substitutions: "[" search_replace_tuple, ... "]"
search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")"
line_match_RE: regular expression that selects the line (uses match)
search_RE: regular expression that is searched in the line
replacement: replacement string for search_RE
Running :func:`edit_txt` does pretty much what a simple ::
sed /line_match_RE/s/search_RE/replacement/
with repeated substitution commands does.
Special replacement values:
- ``None``: the rule is ignored
- ``False``: the line is deleted (even if other rules match)
.. note::
* No sanity checks are performed and the substitutions must be supplied
exactly as shown.
* All substitutions are applied to a line; thus the order of the substitution
commands may matter when one substitution generates a match for a subsequent rule.
* If replacement is set to ``None`` then the whole expression is ignored and
whatever is in the template is used. To unset values you must provided an
empty string or similar.
* Delete a matching line if replacement=``False``. |
def league_info():
"""Returns a dictionary of league information"""
league = __get_league_object()
output = {}
for x in league.attrib:
output[x] = league.attrib[x]
return output | Returns a dictionary of league information |
def _bn(editor, force=False):
"""
Go to next buffer.
"""
eb = editor.window_arrangement.active_editor_buffer
if not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
editor.window_arrangement.go_to_next_buffer() | Go to next buffer. |
def term(self):
"""
term: atom (('*' | '/' | '//') atom)*
"""
node = self.atom()
while self.token.nature in (Nature.MUL, Nature.DIV, Nature.INT_DIV):
token = self.token
if token.nature == Nature.MUL:
self._process(Nature.MUL)
elif token.nature == Nature.DIV:
self._process(Nature.DIV)
elif token.nature == Nature.INT_DIV:
self._process(Nature.INT_DIV)
else:
self._error()
node = BinaryOperation(left=node, op=token, right=self.atom())
return node | term: atom (('*' | '/' | '//') atom)* |
def get_project_name(project_id, projects):
"""Retrieves project name for given project id
Args:
projects: List of projects
project_id: project id
Returns: Project name or None if there is no match
"""
for project in projects:
if project_id == project.id:
return project.name | Retrieves project name for given project id
Args:
projects: List of projects
project_id: project id
Returns: Project name or None if there is no match |
def get_url(self, name, view_name, kwargs, request):
"""
Given a name, view name and kwargs, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# Return None if the view name is not supplied
if not view_name:
return None
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.reverse(view_name, kwargs=kwargs, request=request)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
raise ImproperlyConfigured(msg % view_name)
if url is None:
return None
return Hyperlink(url, name) | Given a name, view name and kwargs, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf. |
def attach(self, instance_id, device):
"""
Attach this EBS volume to an EC2 instance.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device: str
:param device: The device on the instance through which the
volume will be exposed (e.g. /dev/sdh)
:rtype: bool
:return: True if successful
"""
return self.connection.attach_volume(self.id, instance_id, device) | Attach this EBS volume to an EC2 instance.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device: str
:param device: The device on the instance through which the
volume will be exposed (e.g. /dev/sdh)
:rtype: bool
:return: True if successful |
def register_agent(self, host, sweep_id=None, project_name=None):
"""Register a new agent
Args:
host (str): hostname
persistent (bool): long running or oneoff
sweep (str): sweep id
project_name: (str): model that contains sweep
"""
mutation = gql('''
mutation CreateAgent(
$host: String!
$projectName: String!,
$entityName: String!,
$sweep: String!
) {
createAgent(input: {
host: $host,
projectName: $projectName,
entityName: $entityName,
sweep: $sweep,
}) {
agent {
id
}
}
}
''')
if project_name is None:
project_name = self.settings('project')
# don't retry on validation errors
def no_retry_400(e):
if not isinstance(e, requests.HTTPError):
return True
if e.response.status_code != 400:
return True
body = json.loads(e.response.content)
raise UsageError(body['errors'][0]['message'])
response = self.gql(mutation, variable_values={
'host': host,
'entityName': self.settings("entity"),
'projectName': project_name,
'sweep': sweep_id}, check_retry_fn=no_retry_400)
return response['createAgent']['agent'] | Register a new agent
Args:
host (str): hostname
persistent (bool): long running or oneoff
sweep (str): sweep id
project_name: (str): model that contains sweep |
def activate(self, profile_name=NotSet):
"""
Sets <PROFILE_ROOT>_PROFILE environment variable to the name of the current profile.
"""
if profile_name is NotSet:
profile_name = self.profile_name
self._active_profile_name = profile_name | Sets <PROFILE_ROOT>_PROFILE environment variable to the name of the current profile. |
def _relation(self, id, join_on, join_to, level=None, featuretype=None,
order_by=None, reverse=False, completely_within=False,
limit=None):
# The following docstring will be included in the parents() and
# children() docstrings to maintain consistency, since they both
# delegate to this method.
"""
Parameters
----------
id : string or a Feature object
level : None or int
If `level=None` (default), then return all children regardless
of level. If `level` is an integer, then constrain to just that
level.
{_method_doc}
Returns
-------
A generator object that yields :class:`Feature` objects.
"""
if isinstance(id, Feature):
id = id.id
other = '''
JOIN relations
ON relations.{join_on} = features.id
WHERE relations.{join_to} = ?
'''.format(**locals())
args = [id]
level_clause = ''
if level is not None:
level_clause = 'relations.level = ?'
args.append(level)
query, args = helpers.make_query(
args=args,
other=other,
extra=level_clause,
featuretype=featuretype,
order_by=order_by,
reverse=reverse,
limit=limit,
completely_within=completely_within,
)
# modify _SELECT so that only unique results are returned
query = query.replace("SELECT", "SELECT DISTINCT")
for i in self._execute(query, args):
yield self._feature_returner(**i) | Parameters
----------
id : string or a Feature object
level : None or int
If `level=None` (default), then return all children regardless
of level. If `level` is an integer, then constrain to just that
level.
{_method_doc}
Returns
-------
A generator object that yields :class:`Feature` objects. |
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time | For limiting how often frames are computed. |
def scale(config=None, name=None, replicas=None):
"""
Scales the number of pods in the specified K8sReplicationController to the desired replica count.
:param config: an instance of K8sConfig
:param name: the name of the ReplicationController we want to scale.
:param replicas: the desired number of replicas.
:return: An instance of K8sReplicationController
"""
rc = K8sReplicationController(config=config, name=name).get()
rc.desired_replicas = replicas
rc.update()
rc._wait_for_desired_replicas()
return rc | Scales the number of pods in the specified K8sReplicationController to the desired replica count.
:param config: an instance of K8sConfig
:param name: the name of the ReplicationController we want to scale.
:param replicas: the desired number of replicas.
:return: An instance of K8sReplicationController |
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists | Check if specified image exists |
def iter_transport_opts(opts):
'''
Yield transport, opts for all master configured transports
'''
transports = set()
for transport, opts_overrides in six.iteritems(opts.get('transport_opts', {})):
t_opts = dict(opts)
t_opts.update(opts_overrides)
t_opts['transport'] = transport
transports.add(transport)
yield transport, t_opts
if opts['transport'] not in transports:
yield opts['transport'], opts | Yield transport, opts for all master configured transports |
def date_time_this_month(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the current month.
:param before_now: include days in current month before today
:param after_now: include days in current month after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_month_start = now.replace(
day=1, hour=0, minute=0, second=0, microsecond=0)
next_month_start = this_month_start + \
relativedelta.relativedelta(months=1)
if before_now and after_now:
return self.date_time_between_dates(
this_month_start, next_month_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_month_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_month_start, now, tzinfo)
else:
return now | Gets a DateTime object for the current month.
:param before_now: include days in current month before today
:param after_now: include days in current month after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime |
async def register_user(self, password, **kwds):
"""
This function is used to provide a sessionToken for later requests.
Args:
uid (str): The
"""
# so make one
user = await self._create_remote_user(password=password, **kwds)
# if there is no pk field
if not 'pk' in user:
# make sure the user has a pk field
user['pk'] = user['id']
# the query to find a matching query
match_query = self.model.user == user['id']
# if the user has already been registered
if self.model.select().where(match_query).count() > 0:
# yell loudly
raise RuntimeError('The user is already registered.')
# create an entry in the user password table
password = self.model(user=user['id'], password=password)
# save it to the database
password.save()
# return a dictionary with the user we created and a session token for later use
return {
'user': user,
'sessionToken': self._user_session_token(user)
} | This function is used to provide a sessionToken for later requests.
Args:
uid (str): The |
def missing_some(data, min_required, args):
"""Implements the missing_some operator for finding missing variables."""
if min_required < 1:
return []
found = 0
not_found = object()
ret = []
for arg in args:
if get_var(data, arg, not_found) is not_found:
ret.append(arg)
else:
found += 1
if found >= min_required:
return []
return ret | Implements the missing_some operator for finding missing variables. |
def _build(self, one_hot_input_sequence):
"""Builds the deep LSTM model sub-graph.
Args:
one_hot_input_sequence: A Tensor with the input sequence encoded as a
one-hot representation. Its dimensions should be `[truncation_length,
batch_size, output_size]`.
Returns:
Tuple of the Tensor of output logits for the batch, with dimensions
`[truncation_length, batch_size, output_size]`, and the
final state of the unrolled core,.
"""
input_shape = one_hot_input_sequence.get_shape()
batch_size = input_shape[1]
batch_embed_module = snt.BatchApply(self._embed_module)
input_sequence = batch_embed_module(one_hot_input_sequence)
input_sequence = tf.nn.relu(input_sequence)
initial_state = self._core.initial_state(batch_size)
if self._use_dynamic_rnn:
output_sequence, final_state = tf.nn.dynamic_rnn(
cell=self._core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
else:
rnn_input_sequence = tf.unstack(input_sequence)
output, final_state = tf.contrib.rnn.static_rnn(
cell=self._core,
inputs=rnn_input_sequence,
initial_state=initial_state)
output_sequence = tf.stack(output)
batch_output_module = snt.BatchApply(self._output_module)
output_sequence_logits = batch_output_module(output_sequence)
return output_sequence_logits, final_state | Builds the deep LSTM model sub-graph.
Args:
one_hot_input_sequence: A Tensor with the input sequence encoded as a
one-hot representation. Its dimensions should be `[truncation_length,
batch_size, output_size]`.
Returns:
Tuple of the Tensor of output logits for the batch, with dimensions
`[truncation_length, batch_size, output_size]`, and the
final state of the unrolled core,. |
def get_param_type_indexes(self, data, name=None, prev=None):
"""Get from a docstring a parameter type indexes.
In javadoc style it is after @type.
:param data: string to parse
:param name: the name of the parameter (Default value = None)
:param prev: index after the previous element (param or param's description) (Default value = None)
:returns: start and end indexes of found element else (-1, -1)
Note: the end index is the index after the last included character or -1 if
reached the end
:rtype: tuple
"""
start, end = -1, -1
stl_type = self.opt['type'][self.style['in']]['name']
if not prev:
_, prev = self.get_param_description_indexes(data)
if prev >= 0:
if self.style['in'] in self.tagstyles + ['unknown']:
idx = self.get_elem_index(data[prev:])
if idx >= 0 and data[prev + idx:].startswith(stl_type):
idx = prev + idx + len(stl_type)
m = re.match(r'\W*(\w+)\W+(\w+)\W*', data[idx:].strip())
if m:
param = m.group(1).strip()
if (name and param == name) or not name:
desc = m.group(2)
start = data[idx:].find(desc) + idx
end = self.get_elem_index(data[start:])
if end >= 0:
end += start
if self.style['in'] in ['params', 'unknown'] and (start, end) == (-1, -1):
# TODO: manage this
pass
return (start, end) | Get from a docstring a parameter type indexes.
In javadoc style it is after @type.
:param data: string to parse
:param name: the name of the parameter (Default value = None)
:param prev: index after the previous element (param or param's description) (Default value = None)
:returns: start and end indexes of found element else (-1, -1)
Note: the end index is the index after the last included character or -1 if
reached the end
:rtype: tuple |
def lifter(cepstra, L=22):
"""Apply a cepstral lifter the the matrix of cepstra. This has the effect of increasing the
magnitude of the high frequency DCT coeffs.
:param cepstra: the matrix of mel-cepstra, will be numframes * numcep in size.
:param L: the liftering coefficient to use. Default is 22. L <= 0 disables lifter.
"""
if L > 0:
nframes,ncoeff = numpy.shape(cepstra)
n = numpy.arange(ncoeff)
lift = 1 + (L/2.)*numpy.sin(numpy.pi*n/L)
return lift*cepstra
else:
# values of L <= 0, do nothing
return cepstra | Apply a cepstral lifter the the matrix of cepstra. This has the effect of increasing the
magnitude of the high frequency DCT coeffs.
:param cepstra: the matrix of mel-cepstra, will be numframes * numcep in size.
:param L: the liftering coefficient to use. Default is 22. L <= 0 disables lifter. |
def style(self):
"""Function to apply some styles to the layers."""
LOGGER.info('ANALYSIS : Styling')
classes = generate_classified_legend(
self.analysis_impacted,
self.exposure,
self.hazard,
self.use_rounding,
self.debug_mode)
# Let's style layers which have a geometry and have hazard_class
hazard_class = hazard_class_field['key']
for layer in self._outputs():
without_geometries = [
QgsWkbTypes.NullGeometry,
QgsWkbTypes.UnknownGeometry]
if layer.geometryType() not in without_geometries:
display_not_exposed = False
if layer == self.impact or self.debug_mode:
display_not_exposed = True
if layer.keywords['inasafe_fields'].get(hazard_class):
hazard_class_style(layer, classes, display_not_exposed)
# Let's style the aggregation and analysis layer.
simple_polygon_without_brush(
self.aggregation_summary, aggregation_width, aggregation_color)
simple_polygon_without_brush(
self.analysis_impacted, analysis_width, analysis_color)
# Styling is finished, save them as QML
for layer in self._outputs():
layer.saveDefaultStyle() | Function to apply some styles to the layers. |
def find_frametype(self, gpstime=None, frametype_match=None,
host=None, port=None, return_all=False,
allow_tape=True):
"""Find the containing frametype(s) for this `Channel`
Parameters
----------
gpstime : `int`
a reference GPS time at which to search for frame files
frametype_match : `str`
a regular expression string to use to down-select from the
list of all available frametypes
host : `str`
the name of the datafind server to use for frame file discovery
port : `int`
the port of the datafind server on the given host
return_all: `bool`, default: `False`
return all matched frame types, otherwise only the first match is
returned
allow_tape : `bool`, default: `True`
include frame files on (slow) magnetic tape in the search
Returns
-------
frametype : `str`, `list`
the first matching frametype containing the this channel
(`return_all=False`, or a `list` of all matches
"""
return datafind.find_frametype(
self, gpstime=gpstime, frametype_match=frametype_match,
host=host, port=port, return_all=return_all,
allow_tape=allow_tape) | Find the containing frametype(s) for this `Channel`
Parameters
----------
gpstime : `int`
a reference GPS time at which to search for frame files
frametype_match : `str`
a regular expression string to use to down-select from the
list of all available frametypes
host : `str`
the name of the datafind server to use for frame file discovery
port : `int`
the port of the datafind server on the given host
return_all: `bool`, default: `False`
return all matched frame types, otherwise only the first match is
returned
allow_tape : `bool`, default: `True`
include frame files on (slow) magnetic tape in the search
Returns
-------
frametype : `str`, `list`
the first matching frametype containing the this channel
(`return_all=False`, or a `list` of all matches |
def split(self, length, vertical=True):
"""
Returns two bounding boxes representing the current
bounds split into two smaller boxes.
Parameters
-------------
length: float, length to split
vertical: bool, if True will split box vertically
Returns
-------------
box: (2,4) float, two bounding boxes consisting of:
[minx, miny, maxx, maxy]
"""
# also know as [minx, miny, maxx, maxy]
[left, bottom, right, top] = self.bounds
if vertical:
box = [[left, bottom, left + length, top],
[left + length, bottom, right, top]]
else:
box = [[left, bottom, right, bottom + length],
[left, bottom + length, right, top]]
return box | Returns two bounding boxes representing the current
bounds split into two smaller boxes.
Parameters
-------------
length: float, length to split
vertical: bool, if True will split box vertically
Returns
-------------
box: (2,4) float, two bounding boxes consisting of:
[minx, miny, maxx, maxy] |
def wait_for_response(client, timeout, path='/', expected_status_code=None):
"""
Try make a GET request with an HTTP client against a certain path and
return once any response has been received, ignoring any errors.
:param ContainerHttpClient client:
The HTTP client to use to connect to the container.
:param timeout:
Timeout value in seconds.
:param path:
HTTP path to request.
:param int expected_status_code:
If set, wait until a response with this status code is received. If not
set, the status code will not be checked.
:raises TimeoutError:
If a request fails to be made within the timeout period.
"""
# We want time.monotonic on Pythons that have it, otherwise time.time will
# have to do.
get_time = getattr(time, 'monotonic', time.time)
deadline = get_time() + timeout
while True:
try:
# Don't care what the response is, as long as we get one
time_left = deadline - get_time()
response = client.get(
path, timeout=max(time_left, 0.001), allow_redirects=False)
if (expected_status_code is None
or response.status_code == expected_status_code):
return
except requests.exceptions.Timeout:
# Requests timed out, our time must be up
break
except Exception:
# Ignore other exceptions
pass
if get_time() >= deadline:
break
time.sleep(0.1)
raise TimeoutError('Timeout waiting for HTTP response.') | Try make a GET request with an HTTP client against a certain path and
return once any response has been received, ignoring any errors.
:param ContainerHttpClient client:
The HTTP client to use to connect to the container.
:param timeout:
Timeout value in seconds.
:param path:
HTTP path to request.
:param int expected_status_code:
If set, wait until a response with this status code is received. If not
set, the status code will not be checked.
:raises TimeoutError:
If a request fails to be made within the timeout period. |
def call(self, task, decorators=None):
"""
Call given task on service layer.
:param task: task to be called. task will be decorated with
TaskDecorator's contained in 'decorators' list
:type task: instance of Task class
:param decorators: list of TaskDecorator's / TaskResultDecorator's
inherited classes
:type decorators: list
:return task_result: result of task call decorated with TaskResultDecorator's
contained in 'decorators' list
:rtype TaskResult instance
"""
if decorators is None:
decorators = []
task = self.apply_task_decorators(task, decorators)
data = task.get_data()
name = task.get_name()
result = self._inner_call(name, data)
task_result = RawTaskResult(task, result)
return self.apply_task_result_decorators(task_result, decorators) | Call given task on service layer.
:param task: task to be called. task will be decorated with
TaskDecorator's contained in 'decorators' list
:type task: instance of Task class
:param decorators: list of TaskDecorator's / TaskResultDecorator's
inherited classes
:type decorators: list
:return task_result: result of task call decorated with TaskResultDecorator's
contained in 'decorators' list
:rtype TaskResult instance |
def parse_metric_family(buf):
"""
Parse the binary buffer in input, searching for Prometheus messages
of type MetricFamily [0] delimited by a varint32 [1].
[0] https://github.com/prometheus/client_model/blob/086fe7ca28bde6cec2acd5223423c1475a362858/metrics.proto#L76-%20%20L81 # noqa: E501
[1] https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractMessageLite#writeDelimitedTo(java.io.OutputStream) # noqa: E501
"""
n = 0
while n < len(buf):
msg_len, new_pos = _DecodeVarint32(buf, n)
n = new_pos
msg_buf = buf[n : n + msg_len]
n += msg_len
message = metrics_pb2.MetricFamily()
message.ParseFromString(msg_buf)
yield message | Parse the binary buffer in input, searching for Prometheus messages
of type MetricFamily [0] delimited by a varint32 [1].
[0] https://github.com/prometheus/client_model/blob/086fe7ca28bde6cec2acd5223423c1475a362858/metrics.proto#L76-%20%20L81 # noqa: E501
[1] https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractMessageLite#writeDelimitedTo(java.io.OutputStream) # noqa: E501 |
def add_matches(self):
"""Adds all regular expressions declared in
guake.globals.TERMINAL_MATCH_EXPRS to the terminal to make vte
highlight text that matches them.
"""
try:
# NOTE: PCRE2_UTF | PCRE2_NO_UTF_CHECK | PCRE2_MULTILINE
# reference from vte/bindings/vala/app.vala, flags = 0x40080400u
# also ref: https://mail.gnome.org/archives/commits-list/2016-September/msg06218.html
VTE_REGEX_FLAGS = 0x40080400
for expr in TERMINAL_MATCH_EXPRS:
tag = self.match_add_regex(
Vte.Regex.new_for_match(expr, len(expr), VTE_REGEX_FLAGS), 0
)
self.match_set_cursor_type(tag, Gdk.CursorType.HAND2)
for _useless, match, _otheruseless in QUICK_OPEN_MATCHERS:
tag = self.match_add_regex(
Vte.Regex.new_for_match(match, len(match), VTE_REGEX_FLAGS), 0
)
self.match_set_cursor_type(tag, Gdk.CursorType.HAND2)
except (GLib.Error, AttributeError) as e: # pylint: disable=catching-non-exception
try:
compile_flag = 0
if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) >= (0, 44):
compile_flag = GLib.RegexCompileFlags.MULTILINE
for expr in TERMINAL_MATCH_EXPRS:
tag = self.match_add_gregex(GLib.Regex.new(expr, compile_flag, 0), 0)
self.match_set_cursor_type(tag, Gdk.CursorType.HAND2)
for _useless, match, _otheruseless in QUICK_OPEN_MATCHERS:
tag = self.match_add_gregex(GLib.Regex.new(match, compile_flag, 0), 0)
self.match_set_cursor_type(tag, Gdk.CursorType.HAND2)
except GLib.Error as e: # pylint: disable=catching-non-exception
log.error(
"ERROR: PCRE2 does not seems to be enabled on your system. "
"Quick Edit and other Ctrl+click features are disabled. "
"Please update your VTE package or contact your distribution to ask "
"to enable regular expression support in VTE. Exception: '%s'", str(e)
) | Adds all regular expressions declared in
guake.globals.TERMINAL_MATCH_EXPRS to the terminal to make vte
highlight text that matches them. |
def south_field_triple(self):
"""
Returns a suitable description of this field for South.
"""
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
try:
# Check if the field provides its own 'field_class':
field_class = self.translated_field.south_field_triple()[0]
except AttributeError:
field_class = '%s.%s' % (self.translated_field.__class__.__module__,
self.translated_field.__class__.__name__)
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs) | Returns a suitable description of this field for South. |
def get_thellier_gui_meas_mapping(input_df, output=2):
"""
Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui)
"""
if int(output) == 2:
thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy()
if 'treat_step_num' in input_df.columns:
thellier_gui_meas3_2_meas2_map.update(
{'treat_step_num': 'measurement_number'})
thellier_gui_meas3_2_meas2_map.pop('measurement')
return thellier_gui_meas3_2_meas2_map
# 2 --> 3
else:
thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy()
if 'measurement' in input_df.columns:
thellier_gui_meas2_2_meas3_map.pop('measurement_number')
try:
res = int(input_df.iloc[0]['measurement_number'])
if res < 100:
thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num'
except ValueError as ex:
pass
return thellier_gui_meas2_2_meas3_map | Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui) |
def parse_import_directory(self, rva, size):
"""Walk and parse the import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeroes, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the import directory. ' +
'Invalid Import data at RVA: 0x%x (%s)' % ( rva, str(excp) ) )
break
#raise excp
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.Name)
if not is_valid_dos_filename(dll):
dll = '*invalid*'
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
suspicious_imports = set([ 'LoadLibrary', 'GetProcAddress' ])
suspicious_imports_count = 0
total_symbols = 0
for imp_dll in import_descs:
for symbol in imp_dll.imports:
for suspicious_symbol in suspicious_imports:
if symbol and symbol.name and symbol.name.startswith( suspicious_symbol ):
suspicious_imports_count += 1
break
total_symbols += 1
if suspicious_imports_count == len(suspicious_imports) and total_symbols < 20:
self.__warnings.append(
'Imported symbols contain entries typical of packed executables.' )
return import_descs | Walk and parse the import directory. |
def _succeed(self, request_id, reply, duration):
"""Publish a CommandSucceededEvent."""
self.listeners.publish_command_success(
duration, reply, self.name,
request_id, self.sock_info.address, self.op_id) | Publish a CommandSucceededEvent. |
async def get_resource(self, resource_id: int) -> dict:
"""Get a single resource.
:raises PvApiError when a hub connection occurs."""
resource = await self.request.get(
join_path(self._base_path, str(resource_id))
)
self._sanitize_resource(self._get_to_actual_data(resource))
return resource | Get a single resource.
:raises PvApiError when a hub connection occurs. |
def _get_item(self, package, flavor):
"""Returns the item for ordering a dedicated host."""
for item in package['items']:
if item['keyName'] == flavor:
return item
raise SoftLayer.SoftLayerError("Could not find valid item for: '%s'" % flavor) | Returns the item for ordering a dedicated host. |
def apply_T7(word):
'''If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as].'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if len(v) == 3 and is_vowel(v[0]):
WORD[k] = v[:2] + '.' + v[2:]
word = _compile_dict_into_word(WORD)
return word | If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as]. |
def make_target(url, extra_opts=None):
"""Factory that creates `_Target` objects from URLs.
FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS.
Note:
TLS is only supported on Python 2.7/3.2+.
Args:
url (str):
extra_opts (dict, optional): Passed to Target constructor. Default: None.
Returns:
:class:`_Target`
"""
# debug = extra_opts.get("debug", 1)
parts = compat.urlparse(url, allow_fragments=False)
# scheme is case-insensitive according to https://tools.ietf.org/html/rfc3986
scheme = parts.scheme.lower()
if scheme in ["ftp", "ftps"]:
creds = parts.username, parts.password
tls = scheme == "ftps"
from ftpsync import ftp_target
target = ftp_target.FtpTarget(
parts.path,
parts.hostname,
parts.port,
username=creds[0],
password=creds[1],
tls=tls,
timeout=None,
extra_opts=extra_opts,
)
else:
target = FsTarget(url, extra_opts)
return target | Factory that creates `_Target` objects from URLs.
FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS.
Note:
TLS is only supported on Python 2.7/3.2+.
Args:
url (str):
extra_opts (dict, optional): Passed to Target constructor. Default: None.
Returns:
:class:`_Target` |
def delete(self, save=True):
"""
Deletes the original, plus any thumbnails. Fails silently if there
are errors deleting the thumbnails.
"""
for thumb in self.field.thumbs:
thumb_name, thumb_options = thumb
thumb_filename = self._calc_thumb_filename(thumb_name)
self.storage.delete(thumb_filename)
super(ImageWithThumbsFieldFile, self).delete(save) | Deletes the original, plus any thumbnails. Fails silently if there
are errors deleting the thumbnails. |
def create(self):
"""
Creates the current document in the remote database and if successful,
updates the locally cached Document object with the ``_id``
and ``_rev`` returned as part of the successful response.
"""
# Ensure that an existing document will not be "updated"
doc = dict(self)
if doc.get('_rev') is not None:
doc.__delitem__('_rev')
headers = {'Content-Type': 'application/json'}
resp = self.r_session.post(
self._database.database_url,
headers=headers,
data=json.dumps(doc, cls=self.encoder)
)
resp.raise_for_status()
data = response_to_json_dict(resp)
super(Document, self).__setitem__('_id', data['id'])
super(Document, self).__setitem__('_rev', data['rev']) | Creates the current document in the remote database and if successful,
updates the locally cached Document object with the ``_id``
and ``_rev`` returned as part of the successful response. |
def _get_register_specs(bit_labels):
"""Get the number and size of unique registers from bit_labels list.
Args:
bit_labels (list): this list is of the form::
[['reg1', 0], ['reg1', 1], ['reg2', 0]]
which indicates a register named "reg1" of size 2
and a register named "reg2" of size 1. This is the
format of classic and quantum bit labels in qobj
header.
Yields:
tuple: iterator of register_name:size pairs.
"""
it = itertools.groupby(bit_labels, operator.itemgetter(0))
for register_name, sub_it in it:
yield register_name, max(ind[1] for ind in sub_it) + 1 | Get the number and size of unique registers from bit_labels list.
Args:
bit_labels (list): this list is of the form::
[['reg1', 0], ['reg1', 1], ['reg2', 0]]
which indicates a register named "reg1" of size 2
and a register named "reg2" of size 1. This is the
format of classic and quantum bit labels in qobj
header.
Yields:
tuple: iterator of register_name:size pairs. |
def _render_log():
"""Totally tap into Towncrier internals to get an in-memory result.
"""
config = load_config(ROOT)
definitions = config['types']
fragments, fragment_filenames = find_fragments(
pathlib.Path(config['directory']).absolute(),
config['sections'],
None,
definitions,
)
rendered = render_fragments(
pathlib.Path(config['template']).read_text(encoding='utf-8'),
config['issue_format'],
split_fragments(fragments, definitions),
definitions,
config['underlines'][1:],
)
return rendered | Totally tap into Towncrier internals to get an in-memory result. |
def sort_against(list1, list2, reverse=False):
"""
Arrange items of list1 in the same order as sorted(list2).
In other words, apply to list1 the permutation which takes list2
to sorted(list2, reverse).
"""
try:
return [item for _, item in
sorted(zip(list2, list1), key=lambda x: x[0], reverse=reverse)]
except:
return list1 | Arrange items of list1 in the same order as sorted(list2).
In other words, apply to list1 the permutation which takes list2
to sorted(list2, reverse). |
def copy(self, memo=None, which=None):
"""
Returns a (deep) copy of the current parameter handle.
All connections to parents of the copy will be cut.
:param dict memo: memo for deepcopy
:param Parameterized which: parameterized object which started the copy process [default: self]
"""
#raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy"
if memo is None:
memo = {}
import copy
# the next part makes sure that we do not include parents in any form:
parents = []
if which is None:
which = self
which.traverse_parents(parents.append) # collect parents
for p in parents:
if not id(p) in memo :memo[id(p)] = None # set all parents to be None, so they will not be copied
if not id(self.gradient) in memo:memo[id(self.gradient)] = None # reset the gradient
if not id(self._fixes_) in memo :memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent
copy = copy.deepcopy(self, memo) # and start the copy
copy._parent_index_ = None
copy._trigger_params_changed()
return copy | Returns a (deep) copy of the current parameter handle.
All connections to parents of the copy will be cut.
:param dict memo: memo for deepcopy
:param Parameterized which: parameterized object which started the copy process [default: self] |
def _balance_braces(tokens, filename=None):
"""Raises syntax errors if braces aren't balanced"""
depth = 0
for token, line, quoted in tokens:
if token == '}' and not quoted:
depth -= 1
elif token == '{' and not quoted:
depth += 1
# raise error if we ever have more right braces than left
if depth < 0:
reason = 'unexpected "}"'
raise NgxParserSyntaxError(reason, filename, line)
else:
yield (token, line, quoted)
# raise error if we have less right braces than left at EOF
if depth > 0:
reason = 'unexpected end of file, expecting "}"'
raise NgxParserSyntaxError(reason, filename, line) | Raises syntax errors if braces aren't balanced |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.