code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def list_all_quantities(self, include_native=False, with_info=False):
"""
Return a list of all available quantities in this catalog.
If *include_native* is `True`, includes native quantities.
If *with_info* is `True`, return a dict with quantity info.
See also: list_all_native_quantities
"""
q = set(self._quantity_modifiers)
if include_native:
q.update(self._native_quantities)
return {k: self.get_quantity_info(k) for k in q} if with_info else list(q) | Return a list of all available quantities in this catalog.
If *include_native* is `True`, includes native quantities.
If *with_info* is `True`, return a dict with quantity info.
See also: list_all_native_quantities |
def find_config(config_path: str) -> str:
"""
Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path
"""
if path.isdir(config_path): # dir specified instead of config file
config_path = path.join(config_path, CXF_CONFIG_FILE)
assert path.exists(config_path), '`{}` does not exist'.format(config_path)
return config_path | Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path |
def create_essay_set(text, score, prompt_string, generate_additional=True):
"""
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
"""
x = EssaySet()
for i in xrange(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x | Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not. |
def fetch(self, end=values.unset, start=values.unset):
"""
Fetch a UsageInstance
:param unicode end: The end
:param unicode start: The start
:returns: Fetched UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageInstance
"""
return self._proxy.fetch(end=end, start=start, ) | Fetch a UsageInstance
:param unicode end: The end
:param unicode start: The start
:returns: Fetched UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageInstance |
def adjust_frame(proc_obj, name, pos, absolute_pos):
"""Adjust stack frame by pos positions. If absolute_pos then
pos is an absolute number. Otherwise it is a relative number.
A negative number indexes from the other end."""
if not proc_obj.curframe:
proc_obj.errmsg("No stack.")
return
# Below we remove any negativity. At the end, pos will be
# the new value of proc_obj.curindex.
if absolute_pos:
if pos >= 0:
pos = frame_num(proc_obj, pos)
else:
pos = -pos - 1
pass
else:
pos += proc_obj.curindex
pass
if pos < 0:
proc_obj.errmsg("Adjusting would put us beyond the oldest frame.")
return
elif pos >= len(proc_obj.stack):
proc_obj.errmsg("Adjusting would put us beyond the newest frame.")
return
proc_obj.curindex = pos
proc_obj.curframe = proc_obj.stack[proc_obj.curindex][0]
proc_obj.location()
proc_obj.list_lineno = None
proc_obj.list_offset = proc_obj.curframe.f_lasti
proc_obj.list_object = proc_obj.curframe
proc_obj.list_filename = proc_obj.curframe.f_code.co_filename
return | Adjust stack frame by pos positions. If absolute_pos then
pos is an absolute number. Otherwise it is a relative number.
A negative number indexes from the other end. |
def update_team(self, slug):
"""
Trigger update and cache invalidation for the team identified by the
given `slug`, if any. Returns `True` if the update was successful,
`False` otherwise.
:param slug: GitHub 'slug' name for the team to be updated.
"""
if self._org:
if not self._org.has_team(slug):
return self._org.update()
return self._org.update_team(slug)
# self._org is created during Trac startup, so there should never
# be a case where we try to update an org before it's created; this
# is a sanity check only.
return False | Trigger update and cache invalidation for the team identified by the
given `slug`, if any. Returns `True` if the update was successful,
`False` otherwise.
:param slug: GitHub 'slug' name for the team to be updated. |
def day_night_duration(
self,
daybreak: datetime.time = datetime.time(NORMAL_DAY_START_H),
nightfall: datetime.time = datetime.time(NORMAL_DAY_END_H)) \
-> Tuple[datetime.timedelta, datetime.timedelta]:
"""
Returns a ``(day, night)`` tuple of ``datetime.timedelta`` objects
giving the duration of this interval that falls into day and night
respectively.
"""
daytotal = datetime.timedelta()
nighttotal = datetime.timedelta()
startdate = self.start.date()
enddate = self.end.date()
ndays = (enddate - startdate).days + 1
for i in range(ndays):
date = startdate + datetime.timedelta(days=i)
component = self.component_on_date(date)
# ... an interval on a single day
day = Interval.daytime(date, daybreak, nightfall)
daypart = component.intersection(day)
if daypart is not None:
daytotal += daypart.duration()
nighttotal += component.duration() - daypart.duration()
else:
nighttotal += component.duration()
return daytotal, nighttotal | Returns a ``(day, night)`` tuple of ``datetime.timedelta`` objects
giving the duration of this interval that falls into day and night
respectively. |
def hdr_vals_for_overscan(root):
"""Retrieve header keyword values from RAW and SPT
FITS files to pass on to :func:`check_oscntab` and
:func:`check_overscan`.
Parameters
----------
root : str
Rootname of the observation. Can be relative path
to the file excluding the type of FITS file and
extension, e.g., '/my/path/jxxxxxxxq'.
Returns
-------
ccdamp : str
Amplifiers used to read out the CCDs.
xstart : int
Starting column of the readout in detector
coordinates.
ystart : int
Starting row of the readout in detector
coordinates.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
"""
with fits.open(root + '_spt.fits') as hdu:
spthdr = hdu[0].header
with fits.open(root + '_raw.fits') as hdu:
prihdr = hdu[0].header
xstart = spthdr['SS_A1CRN']
ystart = spthdr['SS_A2CRN']
xsize = spthdr['SS_A1SZE']
ysize = spthdr['SS_A2SZE']
ccdamp = prihdr['CCDAMP']
return ccdamp, xstart, ystart, xsize, ysize | Retrieve header keyword values from RAW and SPT
FITS files to pass on to :func:`check_oscntab` and
:func:`check_overscan`.
Parameters
----------
root : str
Rootname of the observation. Can be relative path
to the file excluding the type of FITS file and
extension, e.g., '/my/path/jxxxxxxxq'.
Returns
-------
ccdamp : str
Amplifiers used to read out the CCDs.
xstart : int
Starting column of the readout in detector
coordinates.
ystart : int
Starting row of the readout in detector
coordinates.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout. |
def send_execute_request(self, socket, code, silent=True, subheader=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
subheader = subheader if subheader is not None else {}
# validate arguments
if not isinstance(code, basestring):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(subheader, dict):
raise TypeError("subheader must be dict, not %s" % type(subheader))
content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
msg = self.session.send(socket, "execute_request", content=content, ident=ident,
subheader=subheader)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg | construct and send an execute request via a socket. |
def sort_header(header_text):
"""sort the chromosomes in a header text"""
lines = header_text.rstrip().split("\n")
rlens = {}
for ln in lines:
m = re.match('@SQ\tSN:(\S+)\tLN:(\S+)',ln)
if m:
rlens[m.group(1)] = m.group(2)
output = ''
done_lens = False
for ln in lines:
if re.match('@SQ\tSN:',ln):
if not done_lens:
done_lens = True
for chr in sorted(rlens.keys()):
output += "@SQ\tSN:"+chr+"\tLN:"+str(rlens[chr])+"\n"
else:
output += ln.rstrip("\n")+"\n"
return output | sort the chromosomes in a header text |
def logs_for_job(self, job_name, wait=False, poll=10): # noqa: C901 - suppress complexity warning for this method
"""Display the logs for a given training job, optionally tailing them until the
job is complete. If the output is a tty or a Jupyter cell, it will be color-coded
based on which instance the log entry is from.
Args:
job_name (str): Name of the training job to display the logs for.
wait (bool): Whether to keep looking for new log entries until the job completes (default: False).
poll (int): The interval in seconds between polling for new log entries and job completion (default: 5).
Raises:
ValueError: If waiting and the training job fails.
"""
description = self.sagemaker_client.describe_training_job(TrainingJobName=job_name)
print(secondary_training_status_message(description, None), end='')
instance_count = description['ResourceConfig']['InstanceCount']
status = description['TrainingJobStatus']
stream_names = [] # The list of log streams
positions = {} # The current position in each stream, map of stream name -> position
# Increase retries allowed (from default of 4), as we don't want waiting for a training job
# to be interrupted by a transient exception.
config = botocore.config.Config(retries={'max_attempts': 15})
client = self.boto_session.client('logs', config=config)
log_group = '/aws/sagemaker/TrainingJobs'
job_already_completed = True if status == 'Completed' or status == 'Failed' or status == 'Stopped' else False
state = LogState.TAILING if wait and not job_already_completed else LogState.COMPLETE
dot = False
color_wrap = sagemaker.logs.ColorWrap()
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait == False, we never check the job status.
#
# If wait == TRUE and job is not completed, the initial state is TAILING
# If wait == FALSE, the initial state is COMPLETE (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that got to Cloudwatch after
# the job was marked complete.
last_describe_job_call = time.time()
last_description = description
while True:
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
try:
streams = client.describe_log_streams(logGroupName=log_group, logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName', limit=instance_count)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, sagemaker.logs.Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except ClientError as e:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
err = e.response.get('Error', {})
if err.get('Code', None) != 'ResourceNotFoundException':
raise
if len(stream_names) > 0:
if dot:
print('')
dot = False
for idx, event in sagemaker.logs.multi_stream_iter(client, log_group, stream_names, positions):
color_wrap(idx, event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = sagemaker.logs.Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = sagemaker.logs.Position(timestamp=event['timestamp'], skip=1)
else:
dot = True
print('.', end='')
sys.stdout.flush()
if state == LogState.COMPLETE:
break
time.sleep(poll)
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.sagemaker_client.describe_training_job(TrainingJobName=job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
print()
print(secondary_training_status_message(description, last_description), end='')
last_description = description
status = description['TrainingJobStatus']
if status == 'Completed' or status == 'Failed' or status == 'Stopped':
print()
state = LogState.JOB_COMPLETE
if wait:
self._check_job_status(job_name, description, 'TrainingJobStatus')
if dot:
print()
# Customers are not billed for hardware provisioning, so billable time is less than total time
billable_time = (description['TrainingEndTime'] - description['TrainingStartTime']) * instance_count
print('Billable seconds:', int(billable_time.total_seconds()) + 1) | Display the logs for a given training job, optionally tailing them until the
job is complete. If the output is a tty or a Jupyter cell, it will be color-coded
based on which instance the log entry is from.
Args:
job_name (str): Name of the training job to display the logs for.
wait (bool): Whether to keep looking for new log entries until the job completes (default: False).
poll (int): The interval in seconds between polling for new log entries and job completion (default: 5).
Raises:
ValueError: If waiting and the training job fails. |
def deploy_docker(self, dockerfile_path, virtualbox_name='default'):
''' a method to deploy app to heroku using docker '''
title = '%s.deploy_docker' % self.__class__.__name__
# validate inputs
input_fields = {
'dockerfile_path': dockerfile_path,
'virtualbox_name': virtualbox_name
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# check app subdomain
if not self.subdomain:
raise Exception('You must access a subdomain before you can deploy to heroku. Try: %s.access()' % self.__class__.__name__)
# import dependencies
from os import path
# validate docker client
from labpack.platforms.docker import dockerClient
dockerClient(virtualbox_name, self.verbose)
# validate dockerfile
if not path.exists(dockerfile_path):
raise Exception('%s is not a valid path on local host.' % dockerfile_path)
dockerfile_root, dockerfile_node = path.split(dockerfile_path)
if dockerfile_node != 'Dockerfile':
raise Exception('heroku requires a file called Dockerfile to deploy using Docker.')
# validate container plugin
from os import devnull
from subprocess import check_output
self.printer('Checking heroku plugin requirements ... ', flush=True)
sys_command = 'heroku plugins --core'
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8')
if heroku_plugins.find('heroku-container-registry') == -1 and heroku_plugins.find('container-registry') == -1:
sys_command = 'heroku plugins'
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8')
if heroku_plugins.find('heroku-container-registry') == -1 and heroku_plugins.find('container-registry') == -1:
self.printer('ERROR')
raise Exception(
'heroku container registry required. Upgrade heroku-cli.')
self.printer('done.')
# verify container login
self.printer('Checking heroku container login ... ', flush=True)
sys_command = 'heroku container:login'
self._handle_command(sys_command)
self.printer('done.')
# Old Login Process (pre 2018.02.03)
# import pexpect
# try:
# child = pexpect.spawn('heroku container:login', timeout=5)
# child.expect('Email:\s?')
# child.sendline(self.email)
# i = child.expect([pexpect.EOF, pexpect.TIMEOUT])
# if i == 0:
# child.terminate()
# elif i == 1:
# child.terminate()
# raise Exception('Some unknown issue prevents Heroku from accepting credentials.\nTry first: heroku login')
# except Exception as err:
# self._check_connectivity(err)
# self.printer('done.')
# verbosity
self.printer('Building docker image ...')
# build docker image
sys_command = 'cd %s; heroku container:push web --app %s' % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
sys_command = 'cd %s; heroku container:release web --app %s' % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
self.printer('Deployment complete.')
return True | a method to deploy app to heroku using docker |
def add_ruleclause_name(self, ns_name, rid) -> bool:
"""Create a tree.Rule"""
ns_name.parser_tree = parsing.Rule(self.value(rid))
return True | Create a tree.Rule |
def to_comm(self, light_request=False):
'''
Convert `self` to :class:`.Publication`.
Returns:
obj: :class:`.Publication` instance.
'''
data = None
if not light_request:
data = read_as_base64(self.file_pointer)
return Publication(
title=self.title,
author=self.author,
pub_year=self.pub_year,
isbn=self.isbn,
urnnbn=self.urnnbn,
uuid=self.uuid,
aleph_id=self.aleph_id,
producent_id=self.producent_id,
is_public=self.is_public,
filename=self.filename,
is_periodical=self.is_periodical,
path=self.path,
b64_data=data,
url=self.url,
file_pointer=self.file_pointer,
) | Convert `self` to :class:`.Publication`.
Returns:
obj: :class:`.Publication` instance. |
def h2z(text, ignore='', kana=True, ascii=False, digit=False):
"""Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana
Parameters
----------
text : str
Half-width Katakana string.
ignore : str
Characters to be ignored in converting.
kana : bool
Either converting Kana or not.
ascii : bool
Either converting ascii or not.
digit : bool
Either converting digit or not.
Return
------
str
Full-width Katakana string.
Examples
--------
>>> print(jaconv.h2z('ティロフィナーレ'))
ティロフィナーレ
>>> print(jaconv.h2z('ティロフィナーレ', ignore='ィ'))
ティロフィナーレ
>>> print(jaconv.h2z('abcd', ascii=True))
ABCD
>>> print(jaconv.h2z('1234', digit=True))
1234
"""
def _conv_dakuten(text):
"""Convert Hankaku Dakuten Kana to Zenkaku Dakuten Kana
"""
text = text.replace("ガ", "ガ").replace("ギ", "ギ")
text = text.replace("グ", "グ").replace("ゲ", "ゲ")
text = text.replace("ゴ", "ゴ").replace("ザ", "ザ")
text = text.replace("ジ", "ジ").replace("ズ", "ズ")
text = text.replace("ゼ", "ゼ").replace("ゾ", "ゾ")
text = text.replace("ダ", "ダ").replace("ヂ", "ヂ")
text = text.replace("ヅ", "ヅ").replace("デ", "デ")
text = text.replace("ド", "ド").replace("バ", "バ")
text = text.replace("ビ", "ビ").replace("ブ", "ブ")
text = text.replace("ベ", "ベ").replace("ボ", "ボ")
text = text.replace("パ", "パ").replace("ピ", "ピ")
text = text.replace("プ", "プ").replace("ペ", "ペ")
return text.replace("ポ", "ポ").replace("ヴ", "ヴ")
if ascii:
if digit:
if kana:
h2z_map = H2Z_ALL
else:
h2z_map = H2Z_AD
elif kana:
h2z_map = H2Z_AK
else:
h2z_map = H2Z_A
elif digit:
if kana:
h2z_map = H2Z_DK
else:
h2z_map = H2Z_D
else:
h2z_map = H2Z_K
if kana:
text = _conv_dakuten(text)
if ignore:
h2z_map = _exclude_ignorechar(ignore, h2z_map.copy())
return _convert(text, h2z_map) | Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana
Parameters
----------
text : str
Half-width Katakana string.
ignore : str
Characters to be ignored in converting.
kana : bool
Either converting Kana or not.
ascii : bool
Either converting ascii or not.
digit : bool
Either converting digit or not.
Return
------
str
Full-width Katakana string.
Examples
--------
>>> print(jaconv.h2z('ティロフィナーレ'))
ティロフィナーレ
>>> print(jaconv.h2z('ティロフィナーレ', ignore='ィ'))
ティロフィナーレ
>>> print(jaconv.h2z('abcd', ascii=True))
ABCD
>>> print(jaconv.h2z('1234', digit=True))
1234 |
def get(ctx):
"""Get job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job --job=1 get
```
\b
```bash
$ polyaxon job --job=1 --project=project_name get
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
try:
response = PolyaxonClient().job.get_job(user, project_name, _job)
cache.cache(config_manager=JobManager, response=response)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
get_job_details(response) | Get job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job --job=1 get
```
\b
```bash
$ polyaxon job --job=1 --project=project_name get
``` |
def is_prime( n ):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
# Run the test t times:
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in range( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True | Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million. |
def expand_tile(units, axis):
"""
Expand and tile tensor along given axis
Args:
units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
axis: axis along which expand and tile. Must be 1 or 2
"""
assert axis in (1, 2)
n_time_steps = K.int_shape(units)[1]
repetitions = [1, 1, 1, 1]
repetitions[axis] = n_time_steps
if axis == 1:
expanded = Reshape(target_shape=( (1,) + K.int_shape(units)[1:] ))(units)
else:
expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units)
return K.tile(expanded, repetitions) | Expand and tile tensor along given axis
Args:
units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
axis: axis along which expand and tile. Must be 1 or 2 |
def _join_summary_file(data, summary_filename="msd_summary_file.h5"):
""" Gets the trackinfo array by joining taste profile to the track summary file """
msd = h5py.File(summary_filename)
# create a lookup table of trackid -> position
track_lookup = dict((t.encode("utf8"), i) for i, t in enumerate(data['track'].cat.categories))
# join on trackid to the summary file to get the artist/album/songname
track_info = np.empty(shape=(len(track_lookup), 4), dtype=np.object)
with tqdm.tqdm(total=len(track_info)) as progress:
for song in msd['metadata']['songs']:
trackid = song[17]
if trackid in track_lookup:
pos = track_lookup[trackid]
track_info[pos] = [x.decode("utf8") for x in (trackid, song[9], song[14], song[18])]
progress.update(1)
return track_info | Gets the trackinfo array by joining taste profile to the track summary file |
def get_code_indices(s: Union[str, 'ChainedBase']) -> Dict[int, str]:
""" Retrieve a dict of {index: escape_code} for a given string.
If no escape codes are found, an empty dict is returned.
"""
indices = {}
i = 0
codes = get_codes(s)
for code in codes:
codeindex = s.index(code)
realindex = i + codeindex
indices[realindex] = code
codelen = len(code)
i = realindex + codelen
s = s[codeindex + codelen:]
return indices | Retrieve a dict of {index: escape_code} for a given string.
If no escape codes are found, an empty dict is returned. |
def decode(self, file_name):
"""
Parses the filename, creating a FileTag from it.
It will try both the old and the new conventions, if the filename does
not conform any of them, then an empty FileTag will be returned.
:param file_name: filename to parse
:return: a FileTag instance
"""
try:
file_tag = self._filename_decoder_new.decode(file_name)
except:
try:
file_tag = self._filename_decoder_old.decode(file_name)
except:
file_tag = FileTag(0, 0, '', '', '')
return file_tag | Parses the filename, creating a FileTag from it.
It will try both the old and the new conventions, if the filename does
not conform any of them, then an empty FileTag will be returned.
:param file_name: filename to parse
:return: a FileTag instance |
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret | .. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists |
def _create(archive, compression, cmd, format, verbosity, filenames):
"""Create an LZMA or XZ archive with the lzma Python module."""
if len(filenames) > 1:
raise util.PatoolError('multi-file compression not supported in Python lzma')
try:
with lzma.LZMAFile(archive, mode='wb', **_get_lzma_options(format, preset=9)) as lzmafile:
filename = filenames[0]
with open(filename, 'rb') as srcfile:
data = srcfile.read(READ_SIZE_BYTES)
while data:
lzmafile.write(data)
data = srcfile.read(READ_SIZE_BYTES)
except Exception as err:
msg = "error creating %s: %s" % (archive, err)
raise util.PatoolError(msg)
return None | Create an LZMA or XZ archive with the lzma Python module. |
def example_bigbeds():
"""
Returns list of example bigBed files
"""
hits = []
d = data_dir()
for fn in os.listdir(d):
fn = os.path.join(d, fn)
if os.path.splitext(fn)[-1] == '.bigBed':
hits.append(os.path.abspath(fn))
return hits | Returns list of example bigBed files |
def iterable_source(iterable, target):
"""Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
"""
it = iter(iterable)
for item in it:
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter() | Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items. |
def send(vm, target, key='uuid'):
'''
Send a vm to a directory
vm : string
vm to be sent
target : string
target directory
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.send 186da9ab-7392-4f55-91a5-b8f1fe770543 /opt/backups
salt '*' vmadm.send vm=nacl target=/opt/backups key=alias
'''
ret = {}
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
if not os.path.isdir(target):
ret['Error'] = 'Target must be a directory or host'
return ret
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm
# vmadm send <uuid> [target]
cmd = 'vmadm send {uuid} > {target}'.format(
uuid=vm,
target=os.path.join(target, '{0}.vmdata'.format(vm))
)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
vmobj = get(vm)
if 'datasets' not in vmobj:
return True
log.warning('one or more datasets detected, this is not supported!')
log.warning('trying to zfs send datasets...')
for dataset in vmobj['datasets']:
name = dataset.split('/')
name = name[-1]
cmd = 'zfs send {dataset} > {target}'.format(
dataset=dataset,
target=os.path.join(target, '{0}-{1}.zfsds'.format(vm, name))
)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return True | Send a vm to a directory
vm : string
vm to be sent
target : string
target directory
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.send 186da9ab-7392-4f55-91a5-b8f1fe770543 /opt/backups
salt '*' vmadm.send vm=nacl target=/opt/backups key=alias |
def urls(model,form_class=None,fields=None,redirect=None,object_list=None,fail_if_empty=True):
"""
Returns URL patterns for creating, updating and deleting models. Supports lists and formsets as well
model Model class
form_class Form class for use in create, update and formset views (default is None)
fields Required if form_class is not provided
redirect Redirection URL for create, update and delete views
object_list Queryset for list and formset. If absent, these views are not created
fail_if_empty Raise ImproperlyConfigured exception in formset and list views when object_list is empty
"""
if form_class is None and fields is None:
raise ImproperlyConfigured("Must define either `form_class` or `fields`.")
if object_list is None and redirect is None:
raise ImproperlyConfigured("Must define `redirect` when `object_list` is missing.")
prefix = model.__name__.lower()
if redirect is None: redirect = reverse_lazy(prefix + '_list')
urlpatterns = patterns('',
# Create a new record
url('^' + prefix + '/create/',
CreateView.as_view(model=model,form_class=form_class,fields=fields,success_url=redirect),
name = prefix + '_create'
),
# Update record 'pk'
url('^' + prefix + '/update/(?P<pk>\d+)/',
UpdateView.as_view(model=model,form_class=form_class,fields=fields,success_url=redirect),
name = prefix + '_update'
),
# Delete record 'pk'
url('^' + prefix + '/delete/(?P<pk>\d+)/',
DeleteView.as_view(model=model,success_url=redirect),
name = prefix + '_delete'
),
)
if object_list:
urlpatterns += patterns('',
# List records
url('^' + prefix + '/list/',
ListView.as_view(model=model,object_list=object_list,fail_if_empty=fail_if_empty),
name = prefix + '_list'
),
# Edit records using a formset
url('^' + prefix + '/formset/',
FormsetView.as_view(model=model,form_class=form_class,fields=fields,object_list=object_list,fail_if_empty=fail_if_empty),
name = prefix + '_formset'
),
)
return urlpatterns | Returns URL patterns for creating, updating and deleting models. Supports lists and formsets as well
model Model class
form_class Form class for use in create, update and formset views (default is None)
fields Required if form_class is not provided
redirect Redirection URL for create, update and delete views
object_list Queryset for list and formset. If absent, these views are not created
fail_if_empty Raise ImproperlyConfigured exception in formset and list views when object_list is empty |
def _get_fuzzy_padding(self, lean):
"""
This is not a perfect interpretation as fuzziness is introduced for
redundant uncertainly modifiers e.g. (2006~)~ will get two sets of
fuzziness.
"""
result = relativedelta(0)
if self.year_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_ua._get_multiplier()
if self.month_ua:
result += appsettings.PADDING_MONTH_PRECISION * self.month_ua._get_multiplier()
if self.day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.day_ua._get_multiplier()
if self.year_month_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_month_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.year_month_ua._get_multiplier()
if self.month_day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.month_day_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.month_day_ua._get_multiplier()
if self.season_ua:
result += appsettings.PADDING_SEASON_PRECISION * self.season_ua._get_multiplier()
if self.all_ua:
multiplier = self.all_ua._get_multiplier()
if self.precision == PRECISION_DAY:
result += multiplier * appsettings.PADDING_DAY_PRECISION
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_MONTH:
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_YEAR:
result += multiplier * appsettings.PADDING_YEAR_PRECISION
return result | This is not a perfect interpretation as fuzziness is introduced for
redundant uncertainly modifiers e.g. (2006~)~ will get two sets of
fuzziness. |
def getExpectedValue(distribution):
"""
Calculates E[X] where X is a distribution.
"""
k = np.array(distribution.possibleValues)
return np.sum(k * distribution.pmf(k)) | Calculates E[X] where X is a distribution. |
def migrate(config):
"""Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
"""
webapp = WebApp(config.web_host, config.web_port,
custom_maintenance_file=config.web_custom_html)
webserver = WebServer(webapp)
webserver.daemon = True
webserver.start()
migration_parser = YamlParser.parse_from_file(config.migration_file)
migration = migration_parser.parse()
database = Database(config)
with database.connect() as lock_connection:
application_lock = ApplicationLock(lock_connection)
application_lock.start()
while not application_lock.acquired:
time.sleep(0.5)
else:
if application_lock.replica:
# when a replica could finally acquire a lock, it
# means that the concurrent process has finished the
# migration or that it failed to run it.
# In both cases after the lock is released, this process will
# verify if it has still to do something (if the other process
# failed mainly).
application_lock.stop = True
application_lock.join()
# we are not in the replica or the lock is released: go on for the
# migration
try:
table = MigrationTable(database)
runner = Runner(config, migration, database, table)
runner.perform()
finally:
application_lock.stop = True
application_lock.join() | Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config |
def handle_termination(cls, pid, is_cancel=True):
'''
Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
'''
try:
main_proc = psutil.Process(pid=pid)
child_procs = main_proc.children(recursive=True)
for child_proc in child_procs:
try:
os.kill(child_proc.pid, signal.SIGKILL)
except (TypeError, OSError):
pass
os.kill(main_proc.pid, signal.SIGKILL)
except (TypeError, psutil.Error, OSError):
try:
os.kill(pid, signal.SIGKILL)
except (OSError):
pass | Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag. |
def recover_and_supervise(recovery_file):
""" Retrieve monitor data from recovery_file and resume monitoring """
try:
logging.info("Attempting to recover Supervisor data from " + recovery_file)
with open(recovery_file) as rf:
recovery_data = json.load(rf)
monitor_data = recovery_data['monitor_data']
dependencies = recovery_data['dependencies']
args = recovery_data['args']
except:
logging.error("Could not recover monitor data, exiting...")
return 1
logging.info("Data successfully loaded, resuming Supervisor")
supervise_until_complete(monitor_data, dependencies, args, recovery_file) | Retrieve monitor data from recovery_file and resume monitoring |
def qrcode(self, data, **kwargs):
"""
Render given ``data`` as `QRCode <http://www.qrcode.com/en/>`_.
"""
barcode.validate_qrcode_args(**kwargs)
return self._qrcode_impl(data, **kwargs) | Render given ``data`` as `QRCode <http://www.qrcode.com/en/>`_. |
async def pack_message(wallet_handle: int,
message: str,
recipient_verkeys: list,
sender_verkey: Optional[str]) -> bytes:
"""
Packs a message by encrypting the message and serializes it in a JWE-like format (Experimental)
Note to use DID keys with this function you can call did.key_for_did to get key id (verkey)
for specific DID.
#Params
command_handle: command handle to map callback to user context.
wallet_handle: wallet handler (created by open_wallet)
message: the message being sent as a string. If it's JSON formatted it should be converted to a string
recipient_verkeys: a list of Strings which are recipient verkeys
sender_verkey: the sender's verkey as a string. -> When None is passed in this parameter, anoncrypt mode is used
returns an Agent Wire Message format as a byte array. See HIPE 0028 for detailed formats
"""
logger = logging.getLogger(__name__)
logger.debug("pack_message: >>> wallet_handle: %r, message: %r, recipient_verkeys: %r, sender_verkey: %r",
wallet_handle,
message,
recipient_verkeys,
sender_verkey)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(pack_message, "cb"):
logger.debug("pack_message: Creating callback")
pack_message.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
msg_bytes = message.encode("utf-8")
c_msg_len = c_uint32(len(msg_bytes))
c_recipient_verkeys = c_char_p(json.dumps(recipient_verkeys).encode('utf-8'))
c_sender_vk = c_char_p(sender_verkey.encode('utf-8')) if sender_verkey is not None else None
res = await do_call('indy_pack_message',
c_wallet_handle,
msg_bytes,
c_msg_len,
c_recipient_verkeys,
c_sender_vk,
pack_message.cb)
logger.debug("pack_message: <<< res: %r", res)
return res | Packs a message by encrypting the message and serializes it in a JWE-like format (Experimental)
Note to use DID keys with this function you can call did.key_for_did to get key id (verkey)
for specific DID.
#Params
command_handle: command handle to map callback to user context.
wallet_handle: wallet handler (created by open_wallet)
message: the message being sent as a string. If it's JSON formatted it should be converted to a string
recipient_verkeys: a list of Strings which are recipient verkeys
sender_verkey: the sender's verkey as a string. -> When None is passed in this parameter, anoncrypt mode is used
returns an Agent Wire Message format as a byte array. See HIPE 0028 for detailed formats |
def from_series(self, series, add_index_column=True):
"""
Set tabular attributes to the writer from :py:class:`pandas.Series`.
Following attributes are set by the method:
- :py:attr:`~.headers`
- :py:attr:`~.value_matrix`
- :py:attr:`~.type_hints`
Args:
series(pandas.Series):
Input pandas.Series object.
add_index_column(bool, optional):
If |True|, add a column of ``index`` of the ``series``.
Defaults to |True|.
"""
if series.name:
self.headers = [series.name]
else:
self.headers = ["value"]
self.type_hints = [self.__get_typehint_from_dtype(series.dtype)]
if add_index_column:
self.headers = [""] + self.headers
if self.type_hints:
self.type_hints = [None] + self.type_hints
self.value_matrix = [
[index] + [value] for index, value in zip(series.index.tolist(), series.tolist())
]
else:
self.value_matrix = [[value] for value in series.tolist()] | Set tabular attributes to the writer from :py:class:`pandas.Series`.
Following attributes are set by the method:
- :py:attr:`~.headers`
- :py:attr:`~.value_matrix`
- :py:attr:`~.type_hints`
Args:
series(pandas.Series):
Input pandas.Series object.
add_index_column(bool, optional):
If |True|, add a column of ``index`` of the ``series``.
Defaults to |True|. |
def extended_stats(G, connectivity=False, anc=False, ecc=False, bc=False, cc=False):
"""
Calculate extended topological stats and metrics for a graph.
Many of these algorithms have an inherently high time complexity. Global
topological analysis of large complex networks is extremely time consuming
and may exhaust computer memory. Consider using function arguments to not
run metrics that require computation of a full matrix of paths if they
will not be needed.
Parameters
----------
G : networkx multidigraph
connectivity : bool
if True, calculate node and edge connectivity
anc : bool
if True, calculate average node connectivity
ecc : bool
if True, calculate shortest paths, eccentricity, and topological metrics
that use eccentricity
bc : bool
if True, calculate node betweenness centrality
cc : bool
if True, calculate node closeness centrality
Returns
-------
stats : dict
dictionary of network measures containing the following elements (some
only calculated/returned optionally, based on passed parameters):
- avg_neighbor_degree
- avg_neighbor_degree_avg
- avg_weighted_neighbor_degree
- avg_weighted_neighbor_degree_avg
- degree_centrality
- degree_centrality_avg
- clustering_coefficient
- clustering_coefficient_avg
- clustering_coefficient_weighted
- clustering_coefficient_weighted_avg
- pagerank
- pagerank_max_node
- pagerank_max
- pagerank_min_node
- pagerank_min
- node_connectivity
- node_connectivity_avg
- edge_connectivity
- eccentricity
- diameter
- radius
- center
- periphery
- closeness_centrality
- closeness_centrality_avg
- betweenness_centrality
- betweenness_centrality_avg
"""
stats = {}
full_start_time = time.time()
# create a DiGraph from the MultiDiGraph, for those metrics that require it
G_dir = nx.DiGraph(G)
# create an undirected Graph from the MultiDiGraph, for those metrics that
# require it
G_undir = nx.Graph(G)
# get the largest strongly connected component, for those metrics that
# require strongly connected graphs
G_strong = get_largest_component(G, strongly=True)
# average degree of the neighborhood of each node, and average for the graph
avg_neighbor_degree = nx.average_neighbor_degree(G)
stats['avg_neighbor_degree'] = avg_neighbor_degree
stats['avg_neighbor_degree_avg'] = sum(avg_neighbor_degree.values())/len(avg_neighbor_degree)
# average weighted degree of the neighborhood of each node, and average for
# the graph
avg_weighted_neighbor_degree = nx.average_neighbor_degree(G, weight='length')
stats['avg_weighted_neighbor_degree'] = avg_weighted_neighbor_degree
stats['avg_weighted_neighbor_degree_avg'] = sum(avg_weighted_neighbor_degree.values())/len(avg_weighted_neighbor_degree)
# degree centrality for a node is the fraction of nodes it is connected to
degree_centrality = nx.degree_centrality(G)
stats['degree_centrality'] = degree_centrality
stats['degree_centrality_avg'] = sum(degree_centrality.values())/len(degree_centrality)
# calculate clustering coefficient for the nodes
stats['clustering_coefficient'] = nx.clustering(G_undir)
# average clustering coefficient for the graph
stats['clustering_coefficient_avg'] = nx.average_clustering(G_undir)
# calculate weighted clustering coefficient for the nodes
stats['clustering_coefficient_weighted'] = nx.clustering(G_undir, weight='length')
# average clustering coefficient (weighted) for the graph
stats['clustering_coefficient_weighted_avg'] = nx.average_clustering(G_undir, weight='length')
# pagerank: a ranking of the nodes in the graph based on the structure of
# the incoming links
pagerank = nx.pagerank(G_dir, weight='length')
stats['pagerank'] = pagerank
# node with the highest page rank, and its value
pagerank_max_node = max(pagerank, key=lambda x: pagerank[x])
stats['pagerank_max_node'] = pagerank_max_node
stats['pagerank_max'] = pagerank[pagerank_max_node]
# node with the lowest page rank, and its value
pagerank_min_node = min(pagerank, key=lambda x: pagerank[x])
stats['pagerank_min_node'] = pagerank_min_node
stats['pagerank_min'] = pagerank[pagerank_min_node]
# if True, calculate node and edge connectivity
if connectivity:
start_time = time.time()
# node connectivity is the minimum number of nodes that must be removed
# to disconnect G or render it trivial
stats['node_connectivity'] = nx.node_connectivity(G_strong)
# edge connectivity is equal to the minimum number of edges that must be
# removed to disconnect G or render it trivial
stats['edge_connectivity'] = nx.edge_connectivity(G_strong)
log('Calculated node and edge connectivity in {:,.2f} seconds'.format(time.time() - start_time))
# if True, calculate average node connectivity
if anc:
# mean number of internally node-disjoint paths between each pair of
# nodes in G, i.e., the expected number of nodes that must be removed to
# disconnect a randomly selected pair of non-adjacent nodes
start_time = time.time()
stats['node_connectivity_avg'] = nx.average_node_connectivity(G)
log('Calculated average node connectivity in {:,.2f} seconds'.format(time.time() - start_time))
# if True, calculate shortest paths, eccentricity, and topological metrics
# that use eccentricity
if ecc:
# precompute shortest paths between all nodes for eccentricity-based
# stats
start_time = time.time()
sp = {source:dict(nx.single_source_dijkstra_path_length(G_strong, source, weight='length')) for source in G_strong.nodes()}
log('Calculated shortest path lengths in {:,.2f} seconds'.format(time.time() - start_time))
# eccentricity of a node v is the maximum distance from v to all other
# nodes in G
eccentricity = nx.eccentricity(G_strong, sp=sp)
stats['eccentricity'] = eccentricity
# diameter is the maximum eccentricity
diameter = nx.diameter(G_strong, e=eccentricity)
stats['diameter'] = diameter
# radius is the minimum eccentricity
radius = nx.radius(G_strong, e=eccentricity)
stats['radius'] = radius
# center is the set of nodes with eccentricity equal to radius
center = nx.center(G_strong, e=eccentricity)
stats['center'] = center
# periphery is the set of nodes with eccentricity equal to the diameter
periphery = nx.periphery(G_strong, e=eccentricity)
stats['periphery'] = periphery
# if True, calculate node closeness centrality
if cc:
# closeness centrality of a node is the reciprocal of the sum of the
# shortest path distances from u to all other nodes
start_time = time.time()
closeness_centrality = nx.closeness_centrality(G, distance='length')
stats['closeness_centrality'] = closeness_centrality
stats['closeness_centrality_avg'] = sum(closeness_centrality.values())/len(closeness_centrality)
log('Calculated closeness centrality in {:,.2f} seconds'.format(time.time() - start_time))
# if True, calculate node betweenness centrality
if bc:
# betweenness centrality of a node is the sum of the fraction of
# all-pairs shortest paths that pass through node
start_time = time.time()
betweenness_centrality = nx.betweenness_centrality(G, weight='length')
stats['betweenness_centrality'] = betweenness_centrality
stats['betweenness_centrality_avg'] = sum(betweenness_centrality.values())/len(betweenness_centrality)
log('Calculated betweenness centrality in {:,.2f} seconds'.format(time.time() - start_time))
log('Calculated extended stats in {:,.2f} seconds'.format(time.time()-full_start_time))
return stats | Calculate extended topological stats and metrics for a graph.
Many of these algorithms have an inherently high time complexity. Global
topological analysis of large complex networks is extremely time consuming
and may exhaust computer memory. Consider using function arguments to not
run metrics that require computation of a full matrix of paths if they
will not be needed.
Parameters
----------
G : networkx multidigraph
connectivity : bool
if True, calculate node and edge connectivity
anc : bool
if True, calculate average node connectivity
ecc : bool
if True, calculate shortest paths, eccentricity, and topological metrics
that use eccentricity
bc : bool
if True, calculate node betweenness centrality
cc : bool
if True, calculate node closeness centrality
Returns
-------
stats : dict
dictionary of network measures containing the following elements (some
only calculated/returned optionally, based on passed parameters):
- avg_neighbor_degree
- avg_neighbor_degree_avg
- avg_weighted_neighbor_degree
- avg_weighted_neighbor_degree_avg
- degree_centrality
- degree_centrality_avg
- clustering_coefficient
- clustering_coefficient_avg
- clustering_coefficient_weighted
- clustering_coefficient_weighted_avg
- pagerank
- pagerank_max_node
- pagerank_max
- pagerank_min_node
- pagerank_min
- node_connectivity
- node_connectivity_avg
- edge_connectivity
- eccentricity
- diameter
- radius
- center
- periphery
- closeness_centrality
- closeness_centrality_avg
- betweenness_centrality
- betweenness_centrality_avg |
def get(self, entity_id: EntityId, load: bool = False) -> Entity:
"""Get a Wikidata entity by its :class:`~.entity.EntityId`.
:param entity_id: The :attr:`~.entity.Entity.id` of
the :class:`~.entity.Entity` to find.
:type eneity_id: :class:`~.entity.EntityId`
:param load: Eager loading on :const:`True`.
Lazy loading (:const:`False`) by default.
:type load: :class:`bool`
:return: The found entity.
:rtype: :class:`~.entity.Entity`
.. versionadded:: 0.3.0
The ``load`` option.
"""
try:
entity = self.identity_map[entity_id]
except KeyError:
entity = Entity(entity_id, self)
self.identity_map[entity_id] = entity
if load:
entity.load()
return entity | Get a Wikidata entity by its :class:`~.entity.EntityId`.
:param entity_id: The :attr:`~.entity.Entity.id` of
the :class:`~.entity.Entity` to find.
:type eneity_id: :class:`~.entity.EntityId`
:param load: Eager loading on :const:`True`.
Lazy loading (:const:`False`) by default.
:type load: :class:`bool`
:return: The found entity.
:rtype: :class:`~.entity.Entity`
.. versionadded:: 0.3.0
The ``load`` option. |
def connect(self):
""" Simple connect """
try:
self.telnet = Telnet(self.host, self.port)
time.sleep(1)
self.get()
self.get('login admin admin')
self.update()
except socket.gaierror:
self.telnet = None
LOGGER.error("Cannot connect to %s (%d)",
self.host, self.retries) | Simple connect |
def diff_result_to_cell(item):
'''diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.'''
state = item['state']
if state == 'modified':
new_cell = item['modifiedvalue'].data
old_cell = item['originalvalue'].data
new_cell['metadata']['state'] = state
new_cell['metadata']['original'] = old_cell
cell = new_cell
else:
cell = item['value'].data
cell['metadata']['state'] = state
return cell | diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata. |
def _get_node_text(self, goid, goobj):
"""Return a string to be printed in a GO term box."""
txt = []
# Header line: "GO:0036464 L04 D06"
txt.append(self.pltvars.fmthdr.format(
GO=goobj.id.replace("GO:", "GO"),
level=goobj.level,
depth=goobj.depth))
# GO name line: "cytoplamic ribonucleoprotein"
name = goobj.name.replace(",", "\n")
txt.append(name)
# study info line: "24 genes"
study_txt = self._get_study_txt(goid)
if study_txt is not None:
txt.append(study_txt)
# return text string
return "\n".join(txt) | Return a string to be printed in a GO term box. |
def write_to(self, f):
"""Writes this header to a file, in the format specified by WARC.
"""
f.write(self.version + "\r\n")
for name, value in self.items():
name = name.title()
# Use standard forms for commonly used patterns
name = name.replace("Warc-", "WARC-").replace("-Ip-", "-IP-").replace("-Id", "-ID").replace("-Uri", "-URI")
f.write(name)
f.write(": ")
f.write(value)
f.write("\r\n")
# Header ends with an extra CRLF
f.write("\r\n") | Writes this header to a file, in the format specified by WARC. |
def list_all_products(cls, **kwargs):
"""List Products
Return a list of Products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_products(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Product]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_products_with_http_info(**kwargs)
else:
(data) = cls._list_all_products_with_http_info(**kwargs)
return data | List Products
Return a list of Products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_products(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Product]
If the method is called asynchronously,
returns the request thread. |
def map(self, func):
"""
Process all data with given function.
The scheme of function should be x,y -> x,y.
"""
if self._train_set:
self._train_set = map(func, self._train_set)
if self._valid_set:
self._valid_set = map(func, self._valid_set)
if self._test_set:
self._test_set = map(func, self._test_set) | Process all data with given function.
The scheme of function should be x,y -> x,y. |
def add_entry(self, row):
"""This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well.
"""
var_call = VCFEntry(self.individuals)
var_call.parse_entry( row )
self.entries[(var_call.chrom, var_call.pos)] = var_call
return var_call | This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well. |
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
) | Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists. |
def fetch(self):
"""
Fetch a AddOnResultInstance
:returns: Fetched AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return AddOnResultInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
reference_sid=self._solution['reference_sid'],
sid=self._solution['sid'],
) | Fetch a AddOnResultInstance
:returns: Fetched AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance |
def cancel(self):
"""Cancels the observer
No more notifications will be passed on
"""
LOG.debug('cancelling %s', self)
self._cancelled = True
self.clear_callbacks() # not strictly necessary, but may release references
while True:
try:
self._waitables.get_nowait().put_nowait(self.sentinel)
except queue.Empty:
break | Cancels the observer
No more notifications will be passed on |
def ensure_sphinx_astropy_installed():
"""
Make sure that sphinx-astropy is available, installing it temporarily if not.
This returns the available version of sphinx-astropy as well as any
paths that should be added to sys.path for sphinx-astropy to be available.
"""
# We've split out the Sphinx part of astropy-helpers into sphinx-astropy
# but we want it to be auto-installed seamlessly for anyone using
# build_docs. We check if it's already installed, and if not, we install
# it to a local .eggs directory and add the eggs to the path (these
# have to each be added to the path, we can't add them by simply adding
# .eggs to the path)
sys_path_inserts = []
sphinx_astropy_version = None
try:
from sphinx_astropy import __version__ as sphinx_astropy_version # noqa
except ImportError:
from setuptools import Distribution
dist = Distribution()
# Numpydoc 0.9.0 requires sphinx 1.6+, we can limit the version here
# until we also makes our minimum required version Sphinx 1.6
if SPHINX_LT_16:
dist.fetch_build_eggs('numpydoc<0.9')
# This egg build doesn't respect python_requires, not aware of
# pre-releases. We know that mpl 3.1+ requires Python 3.6+, so this
# ugly workaround takes care of it until there is a solution for
# https://github.com/astropy/astropy-helpers/issues/462
if LooseVersion(sys.version) < LooseVersion('3.6'):
dist.fetch_build_eggs('matplotlib<3.1')
eggs = dist.fetch_build_eggs('sphinx-astropy')
# Find out the version of sphinx-astropy if possible. For some old
# setuptools version, eggs will be None even if sphinx-astropy was
# successfully installed.
if eggs is not None:
for egg in eggs:
if egg.project_name == 'sphinx-astropy':
sphinx_astropy_version = egg.parsed_version.public
break
eggs_path = os.path.abspath('.eggs')
for egg in glob.glob(os.path.join(eggs_path, '*.egg')):
sys_path_inserts.append(egg)
return sphinx_astropy_version, sys_path_inserts | Make sure that sphinx-astropy is available, installing it temporarily if not.
This returns the available version of sphinx-astropy as well as any
paths that should be added to sys.path for sphinx-astropy to be available. |
def _delete_extraneous_files(self):
# type: (Uploader) -> None
"""Delete extraneous files on the remote
:param Uploader self: this
"""
if not self._spec.options.delete_extraneous_destination:
return
# list blobs for all destinations
checked = set()
deleted = 0
for sa, container, vpath, dpath in self._get_destination_paths():
key = ';'.join((sa.name, sa.endpoint, str(dpath)))
if key in checked:
continue
logger.debug(
'attempting to delete extraneous blobs/files from: {}'.format(
key))
if (self._spec.options.mode ==
blobxfer.models.azure.StorageModes.File):
files = blobxfer.operations.azure.file.list_all_files(
sa.file_client, container)
for file in files:
try:
pathlib.Path(file).relative_to(vpath)
except ValueError:
continue
id = blobxfer.operations.upload.Uploader.\
create_destination_id(sa.file_client, container, file)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting file: {}'.format(
file))
else:
if self._general_options.verbose:
logger.debug('deleting file: {}'.format(file))
blobxfer.operations.azure.file.delete_file(
sa.file_client, container, file)
deleted += 1
else:
blobs = blobxfer.operations.azure.blob.list_all_blobs(
sa.block_blob_client, container)
for blob in blobs:
try:
pathlib.Path(blob.name).relative_to(vpath)
except ValueError:
continue
id = blobxfer.operations.upload.Uploader.\
create_destination_id(
sa.block_blob_client, container, blob.name)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting blob: {}'.format(
blob.name))
else:
if self._general_options.verbose:
logger.debug('deleting blob: {}'.format(
blob.name))
blobxfer.operations.azure.blob.delete_blob(
sa.block_blob_client, container, blob.name)
deleted += 1
checked.add(key)
logger.info('deleted {} extraneous blobs/files'.format(deleted)) | Delete extraneous files on the remote
:param Uploader self: this |
def get_instance_field(self, field_name):
"""
Add management of dynamic fields: if a normal field cannot be retrieved,
check if it can be a dynamic field and in this case, create a copy with
the given name and associate it to the instance.
"""
try:
field = super(ModelWithDynamicFieldMixin, self).get_instance_field(field_name)
except AttributeError:
# the "has_field" returned True but getattr raised... we have a DynamicField
dynamic_field = self._get_dynamic_field_for(field_name) # it's a model bound field
dynamic_field = self.get_field(dynamic_field.name) # we now have an instance bound field
field = self._add_dynamic_field_to_instance(dynamic_field, field_name)
return field | Add management of dynamic fields: if a normal field cannot be retrieved,
check if it can be a dynamic field and in this case, create a copy with
the given name and associate it to the instance. |
def to_dict(self):
"""Converts this embed object into a dict."""
# add in the raw data into the dict
result = {
key[1:]: getattr(self, key)
for key in self.__slots__
if key[0] == '_' and hasattr(self, key)
}
# deal with basic convenience wrappers
try:
colour = result.pop('colour')
except KeyError:
pass
else:
if colour:
result['color'] = colour.value
try:
timestamp = result.pop('timestamp')
except KeyError:
pass
else:
if timestamp:
result['timestamp'] = timestamp.isoformat()
# add in the non raw attribute ones
if self.type:
result['type'] = self.type
if self.description:
result['description'] = self.description
if self.url:
result['url'] = self.url
if self.title:
result['title'] = self.title
return result | Converts this embed object into a dict. |
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
if not tf.get_variable_scope().reuse:
tf.summary.audio(name + '-au', tensor, sample_rate) | r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None |
def _proc_uri(self, request, result):
"""
Process the URI rules for the request. Both the desired API
version and desired content type can be determined from those
rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in.
"""
if result:
# Result has already been fully determined
return
# First, determine the version based on the URI prefix
for prefix, version in self.uris:
if (request.path_info == prefix or
request.path_info.startswith(prefix + '/')):
result.set_version(version)
# Update the request particulars
request.script_name += prefix
request.path_info = request.path_info[len(prefix):]
if not request.path_info:
request.path_info = '/'
break
# Next, determine the content type based on the URI suffix
for format, ctype in self.formats.items():
if request.path_info.endswith(format):
result.set_ctype(ctype)
# Update the request particulars
request.path_info = request.path_info[:-len(format)]
break | Process the URI rules for the request. Both the desired API
version and desired content type can be determined from those
rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in. |
def with_continuations(**c):
"""
A decorator for defining tail-call optimized functions.
Example
-------
@with_continuations()
def factorial(n, k, self=None):
return self(n-1, k*n) if n > 1 else k
@with_continuations()
def identity(x, self=None):
return x
@with_continuations(out=identity)
def factorial2(n, k, self=None, out=None):
return self(n-1, k*n) if n > 1 else out(k)
print(factorial(7,1))
print(factorial2(7,1))
"""
if len(c): keys, k = zip(*c.items())
else: keys, k = tuple([]), tuple([])
def d(f):
return C(
lambda kself, *conts:
lambda *args:
f(*args, self=kself, **dict(zip(keys, conts)))) (*k)
return d | A decorator for defining tail-call optimized functions.
Example
-------
@with_continuations()
def factorial(n, k, self=None):
return self(n-1, k*n) if n > 1 else k
@with_continuations()
def identity(x, self=None):
return x
@with_continuations(out=identity)
def factorial2(n, k, self=None, out=None):
return self(n-1, k*n) if n > 1 else out(k)
print(factorial(7,1))
print(factorial2(7,1)) |
def f_lock_parameters(self):
"""Locks all non-empty parameters"""
for par in self._parameters.values():
if not par.f_is_empty():
par.f_lock() | Locks all non-empty parameters |
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path)) | Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP. |
def instance_present(name, instance_name=None, instance_id=None, image_id=None,
image_name=None, tags=None, key_name=None,
security_groups=None, user_data=None, instance_type=None,
placement=None, kernel_id=None, ramdisk_id=None,
vpc_id=None, vpc_name=None, monitoring_enabled=None,
subnet_id=None, subnet_name=None, private_ip_address=None,
block_device_map=None, disable_api_termination=None,
instance_initiated_shutdown_behavior=None,
placement_group=None, client_token=None,
security_group_ids=None, security_group_names=None,
additional_info=None, tenancy=None,
instance_profile_arn=None, instance_profile_name=None,
ebs_optimized=None, network_interfaces=None,
network_interface_name=None,
network_interface_id=None,
attributes=None, target_state=None, public_ip=None,
allocation_id=None, allocate_eip=False, region=None,
key=None, keyid=None, profile=None):
### TODO - implement 'target_state={running, stopped}'
'''
Ensure an EC2 instance is running with the given attributes and state.
name
(string) - The name of the state definition. Recommended that this
match the instance_name attribute (generally the FQDN of the instance).
instance_name
(string) - The name of the instance, generally its FQDN. Exclusive with
'instance_id'.
instance_id
(string) - The ID of the instance (if known). Exclusive with
'instance_name'.
image_id
(string) – The ID of the AMI image to run.
image_name
(string) – The name of the AMI image to run.
tags
(dict) - Tags to apply to the instance.
key_name
(string) – The name of the key pair with which to launch instances.
security_groups
(list of strings) – The names of the EC2 classic security groups with
which to associate instances
user_data
(string) – The Base64-encoded MIME user data to be made available to the
instance(s) in this reservation.
instance_type
(string) – The EC2 instance size/type. Note that only certain types are
compatible with HVM based AMIs.
placement
(string) – The Availability Zone to launch the instance into.
kernel_id
(string) – The ID of the kernel with which to launch the instances.
ramdisk_id
(string) – The ID of the RAM disk with which to launch the instances.
vpc_id
(string) - The ID of a VPC to attach the instance to.
vpc_name
(string) - The name of a VPC to attach the instance to.
monitoring_enabled
(bool) – Enable detailed CloudWatch monitoring on the instance.
subnet_id
(string) – The ID of the subnet within which to launch the instances for
VPC.
subnet_name
(string) – The name of the subnet within which to launch the instances
for VPC.
private_ip_address
(string) – If you’re using VPC, you can optionally use this parameter to
assign the instance a specific available IP address from the subnet
(e.g., 10.0.0.25).
block_device_map
(boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping
data structure describing the EBS volumes associated with the Image.
disable_api_termination
(bool) – If True, the instances will be locked and will not be able to
be terminated via the API.
instance_initiated_shutdown_behavior
(string) – Specifies whether the instance stops or terminates on
instance-initiated shutdown. Valid values are:
- 'stop'
- 'terminate'
placement_group
(string) – If specified, this is the name of the placement group in
which the instance(s) will be launched.
client_token
(string) – Unique, case-sensitive identifier you provide to ensure
idempotency of the request. Maximum 64 ASCII characters.
security_group_ids
(list of strings) – The IDs of the VPC security groups with which to
associate instances.
security_group_names
(list of strings) – The names of the VPC security groups with which to
associate instances.
additional_info
(string) – Specifies additional information to make available to the
instance(s).
tenancy
(string) – The tenancy of the instance you want to launch. An instance
with a tenancy of ‘dedicated’ runs on single-tenant hardware and can
only be launched into a VPC. Valid values are:”default” or “dedicated”.
NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well.
instance_profile_arn
(string) – The Amazon resource name (ARN) of the IAM Instance Profile
(IIP) to associate with the instances.
instance_profile_name
(string) – The name of the IAM Instance Profile (IIP) to associate with
the instances.
ebs_optimized
(bool) – Whether the instance is optimized for EBS I/O. This
optimization provides dedicated throughput to Amazon EBS and a tuned
configuration stack to provide optimal EBS I/O performance. This
optimization isn’t available with all instance types.
network_interfaces
(boto.ec2.networkinterface.NetworkInterfaceCollection) – A
NetworkInterfaceCollection data structure containing the ENI
specifications for the instance.
network_interface_name
(string) - The name of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
network_interface_id
(string) - The id of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
attributes
(dict) - Instance attributes and value to be applied to the instance.
Available options are:
- instanceType - A valid instance type (m1.small)
- kernel - Kernel ID (None)
- ramdisk - Ramdisk ID (None)
- userData - Base64 encoded String (None)
- disableApiTermination - Boolean (true)
- instanceInitiatedShutdownBehavior - stop|terminate
- blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’]
- sourceDestCheck - Boolean (true)
- groupSet - Set of Security Groups or IDs
- ebsOptimized - Boolean (false)
- sriovNetSupport - String - ie: ‘simple’
target_state
(string) - The desired target state of the instance. Available options
are:
- running
- stopped
Note that this option is currently UNIMPLEMENTED.
public_ip:
(string) - The IP of a previously allocated EIP address, which will be
attached to the instance. EC2 Classic instances ONLY - for VCP pass in
an allocation_id instead.
allocation_id:
(string) - The ID of a previously allocated EIP address, which will be
attached to the instance. VPC instances ONLY - for Classic pass in
a public_ip instead.
allocate_eip:
(bool) - Allocate and attach an EIP on-the-fly for this instance. Note
you'll want to releaase this address when terminating the instance,
either manually or via the 'release_eip' flag to 'instance_absent'.
region
(string) - Region to connect to.
key
(string) - Secret key to be used.
keyid
(string) - Access key to be used.
profile
(variable) - A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
_create = False
running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
changed_attrs = {}
if not salt.utils.data.exactly_one((image_id, image_name)):
raise SaltInvocationError('Exactly one of image_id OR '
'image_name must be provided.')
if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)):
raise SaltInvocationError('At most one of public_ip, allocation_id OR '
'allocate_eip may be provided.')
if instance_id:
exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key,
keyid=keyid, profile=profile, in_states=running_states)
if not exists:
_create = True
else:
instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name,
region=region, key=key, keyid=keyid, profile=profile,
in_states=running_states)
if not instances:
_create = True
elif len(instances) > 1:
log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id')
instance_id = None # No way to know, we'll just have to bail later....
else:
instance_id = instances[0]
if _create:
if __opts__['test']:
ret['comment'] = 'The instance {0} is set to be created.'.format(name)
ret['result'] = None
return ret
if image_name:
args = {'ami_name': image_name, 'region': region, 'key': key,
'keyid': keyid, 'profile': profile}
image_ids = __salt__['boto_ec2.find_images'](**args)
if image_ids:
image_id = image_ids[0]
else:
image_id = image_name
r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name,
tags=tags, key_name=key_name,
security_groups=security_groups, user_data=user_data,
instance_type=instance_type, placement=placement,
kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id,
vpc_name=vpc_name, monitoring_enabled=monitoring_enabled,
subnet_id=subnet_id, subnet_name=subnet_name,
private_ip_address=private_ip_address,
block_device_map=block_device_map,
disable_api_termination=disable_api_termination,
instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
placement_group=placement_group, client_token=client_token,
security_group_ids=security_group_ids,
security_group_names=security_group_names,
additional_info=additional_info, tenancy=tenancy,
instance_profile_arn=instance_profile_arn,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized, network_interfaces=network_interfaces,
network_interface_name=network_interface_name,
network_interface_id=network_interface_id,
region=region, key=key, keyid=keyid, profile=profile)
if not r or 'instance_id' not in r:
ret['result'] = False
ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name)
return ret
instance_id = r['instance_id']
ret['changes'] = {'old': {}, 'new': {}}
ret['changes']['old']['instance_id'] = None
ret['changes']['new']['instance_id'] = instance_id
# To avoid issues we only allocate new EIPs at instance creation.
# This might miss situations where an instance is initially created
# created without and one is added later, but the alternative is the
# risk of EIPs allocated at every state run.
if allocate_eip:
if __opts__['test']:
ret['comment'] = 'New EIP would be allocated.'
ret['result'] = None
return ret
domain = 'vpc' if vpc_id or vpc_name else None
r = __salt__['boto_ec2.allocate_eip_address'](
domain=domain, region=region, key=key, keyid=keyid,
profile=profile)
if not r:
ret['result'] = False
ret['comment'] = 'Failed to allocate new EIP.'
return ret
allocation_id = r['allocation_id']
log.info("New EIP with address %s allocated.", r['public_ip'])
else:
log.info("EIP not requested.")
if public_ip or allocation_id:
# This can take a bit to show up, give it a chance to...
tries = 10
secs = 3
for t in range(tries):
r = __salt__['boto_ec2.get_eip_address_info'](
addresses=public_ip, allocation_ids=allocation_id,
region=region, key=key, keyid=keyid, profile=profile)
if r:
break
else:
log.info(
'Waiting up to %s secs for new EIP %s to become available',
tries * secs, public_ip or allocation_id
)
time.sleep(secs)
if not r:
ret['result'] = False
ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id)
return ret
ip = r[0]['public_ip']
if r[0].get('instance_id'):
if r[0]['instance_id'] != instance_id:
ret['result'] = False
ret['comment'] = ('EIP {0} is already associated with instance '
'{1}.'.format(public_ip if public_ip else
allocation_id, r[0]['instance_id']))
return ret
else:
if __opts__['test']:
ret['comment'] = 'Instance {0} to be updated.'.format(name)
ret['result'] = None
return ret
r = __salt__['boto_ec2.associate_eip_address'](
instance_id=instance_id, public_ip=public_ip,
allocation_id=allocation_id, region=region, key=key,
keyid=keyid, profile=profile)
if r:
if 'new' not in ret['changes']:
ret['changes']['new'] = {}
ret['changes']['new']['public_ip'] = ip
else:
ret['result'] = False
ret['comment'] = 'Failed to attach EIP to instance {0}.'.format(
instance_name if instance_name else name)
return ret
if attributes:
for k, v in six.iteritems(attributes):
curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key,
keyid=keyid, profile=profile)
curr = {} if not isinstance(curr, dict) else curr
if curr.get(k) == v:
continue
else:
if __opts__['test']:
changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format(
k, curr.get(k), v)
continue
try:
r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v,
instance_id=instance_id, region=region,
key=key, keyid=keyid, profile=profile)
except SaltInvocationError as e:
ret['result'] = False
ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name)
return ret
ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}}
ret['changes']['old'][k] = curr.get(k)
ret['changes']['new'][k] = v
if __opts__['test']:
if changed_attrs:
ret['changes']['new'] = changed_attrs
ret['result'] = None
else:
ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name)
ret['result'] = True
if tags and instance_id is not None:
tags = dict(tags)
curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id},
region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {}))
current = set(curr_tags.keys())
desired = set(tags.keys())
remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set...
add = dict([(t, tags[t]) for t in desired - current])
replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)])
# Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative.
add.update(replace)
if add or remove:
if __opts__['test']:
ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {}
ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {}
ret['changes']['old']['tags'] = curr_tags
ret['changes']['new']['tags'] = tags
ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if
instance_name else name)
else:
if remove:
if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove,
region=region, key=key, keyid=keyid,
profile=profile):
msg = "Error while deleting tags on instance {0}".format(instance_name if
instance_name else name)
log.error(msg)
ret['comment'] += ' ' + msg
ret['result'] = False
return ret
if add:
if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add,
region=region, key=key, keyid=keyid,
profile=profile):
msg = "Error while creating tags on instance {0}".format(instance_name if
instance_name else name)
log.error(msg)
ret['comment'] += ' ' + msg
ret['result'] = False
return ret
ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {}
ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {}
ret['changes']['old']['tags'] = curr_tags
ret['changes']['new']['tags'] = tags
return ret | Ensure an EC2 instance is running with the given attributes and state.
name
(string) - The name of the state definition. Recommended that this
match the instance_name attribute (generally the FQDN of the instance).
instance_name
(string) - The name of the instance, generally its FQDN. Exclusive with
'instance_id'.
instance_id
(string) - The ID of the instance (if known). Exclusive with
'instance_name'.
image_id
(string) – The ID of the AMI image to run.
image_name
(string) – The name of the AMI image to run.
tags
(dict) - Tags to apply to the instance.
key_name
(string) – The name of the key pair with which to launch instances.
security_groups
(list of strings) – The names of the EC2 classic security groups with
which to associate instances
user_data
(string) – The Base64-encoded MIME user data to be made available to the
instance(s) in this reservation.
instance_type
(string) – The EC2 instance size/type. Note that only certain types are
compatible with HVM based AMIs.
placement
(string) – The Availability Zone to launch the instance into.
kernel_id
(string) – The ID of the kernel with which to launch the instances.
ramdisk_id
(string) – The ID of the RAM disk with which to launch the instances.
vpc_id
(string) - The ID of a VPC to attach the instance to.
vpc_name
(string) - The name of a VPC to attach the instance to.
monitoring_enabled
(bool) – Enable detailed CloudWatch monitoring on the instance.
subnet_id
(string) – The ID of the subnet within which to launch the instances for
VPC.
subnet_name
(string) – The name of the subnet within which to launch the instances
for VPC.
private_ip_address
(string) – If you’re using VPC, you can optionally use this parameter to
assign the instance a specific available IP address from the subnet
(e.g., 10.0.0.25).
block_device_map
(boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping
data structure describing the EBS volumes associated with the Image.
disable_api_termination
(bool) – If True, the instances will be locked and will not be able to
be terminated via the API.
instance_initiated_shutdown_behavior
(string) – Specifies whether the instance stops or terminates on
instance-initiated shutdown. Valid values are:
- 'stop'
- 'terminate'
placement_group
(string) – If specified, this is the name of the placement group in
which the instance(s) will be launched.
client_token
(string) – Unique, case-sensitive identifier you provide to ensure
idempotency of the request. Maximum 64 ASCII characters.
security_group_ids
(list of strings) – The IDs of the VPC security groups with which to
associate instances.
security_group_names
(list of strings) – The names of the VPC security groups with which to
associate instances.
additional_info
(string) – Specifies additional information to make available to the
instance(s).
tenancy
(string) – The tenancy of the instance you want to launch. An instance
with a tenancy of ‘dedicated’ runs on single-tenant hardware and can
only be launched into a VPC. Valid values are:”default” or “dedicated”.
NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well.
instance_profile_arn
(string) – The Amazon resource name (ARN) of the IAM Instance Profile
(IIP) to associate with the instances.
instance_profile_name
(string) – The name of the IAM Instance Profile (IIP) to associate with
the instances.
ebs_optimized
(bool) – Whether the instance is optimized for EBS I/O. This
optimization provides dedicated throughput to Amazon EBS and a tuned
configuration stack to provide optimal EBS I/O performance. This
optimization isn’t available with all instance types.
network_interfaces
(boto.ec2.networkinterface.NetworkInterfaceCollection) – A
NetworkInterfaceCollection data structure containing the ENI
specifications for the instance.
network_interface_name
(string) - The name of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
network_interface_id
(string) - The id of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
attributes
(dict) - Instance attributes and value to be applied to the instance.
Available options are:
- instanceType - A valid instance type (m1.small)
- kernel - Kernel ID (None)
- ramdisk - Ramdisk ID (None)
- userData - Base64 encoded String (None)
- disableApiTermination - Boolean (true)
- instanceInitiatedShutdownBehavior - stop|terminate
- blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’]
- sourceDestCheck - Boolean (true)
- groupSet - Set of Security Groups or IDs
- ebsOptimized - Boolean (false)
- sriovNetSupport - String - ie: ‘simple’
target_state
(string) - The desired target state of the instance. Available options
are:
- running
- stopped
Note that this option is currently UNIMPLEMENTED.
public_ip:
(string) - The IP of a previously allocated EIP address, which will be
attached to the instance. EC2 Classic instances ONLY - for VCP pass in
an allocation_id instead.
allocation_id:
(string) - The ID of a previously allocated EIP address, which will be
attached to the instance. VPC instances ONLY - for Classic pass in
a public_ip instead.
allocate_eip:
(bool) - Allocate and attach an EIP on-the-fly for this instance. Note
you'll want to releaase this address when terminating the instance,
either manually or via the 'release_eip' flag to 'instance_absent'.
region
(string) - Region to connect to.
key
(string) - Secret key to be used.
keyid
(string) - Access key to be used.
profile
(variable) - A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0 |
def status_for_all_orders(self):
"""Status for all orders
https://starfighter.readme.io/docs/status-for-all-orders
"""
url_fragment = 'venues/{venue}/accounts/{account}/orders'.format(
venue=self.venue,
account=self.account,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json() | Status for all orders
https://starfighter.readme.io/docs/status-for-all-orders |
def with_path(self, path, *, encoded=False):
"""Return a new URL with path replaced."""
if not encoded:
path = self._PATH_QUOTER(path)
if self.is_absolute():
path = self._normalize_path(path)
if len(path) > 0 and path[0] != "/":
path = "/" + path
return URL(self._val._replace(path=path, query="", fragment=""), encoded=True) | Return a new URL with path replaced. |
def eventFilter( self, object, event ):
"""
Filters the chart widget for the resize event to modify this scenes
rect.
:param object | <QObject>
event | <QEvent>
"""
if ( event.type() != event.Resize ):
return False
size = event.size()
w = size.width()
h = size.height()
hpolicy = Qt.ScrollBarAlwaysOff
vpolicy = Qt.ScrollBarAlwaysOff
if ( self._minimumHeight != -1 and h < self._minimumHeight ):
h = self._minimumHeight
vpolicy = Qt.ScrollBarAsNeeded
if ( self._maximumHeight != -1 and self._maximumHeight < h ):
h = self._maximumHeight
vpolicy = Qt.ScrollBarAsNeeded
if ( self._minimumWidth != -1 and w < self._minimumWidth ):
w = self._minimumWidth
hpolicy = Qt.ScrollBarAsNeeded
if ( self._maximumWidth != -1 and self._maximumWidth < w ):
w = self._maximumWidth
hpolicy = Qt.ScrollBarAsNeeded
hruler = self.horizontalRuler()
vruler = self.verticalRuler()
hlen = hruler.minLength(Qt.Horizontal)
vlen = hruler.minLength(Qt.Vertical)
offset_w = 0
offset_h = 0
# if ( hlen > w ):
# w = hlen
# hpolicy = Qt.ScrollBarAlwaysOn
# offset_h = 25
#
# if ( vlen > h ):
# h = vlen
# vpolicy = Qt.ScrollBarAlwaysOn
# offset_w = 25
self.setSceneRect(0, 0, w - offset_w, h - offset_h)
object.setVerticalScrollBarPolicy(vpolicy)
object.setHorizontalScrollBarPolicy(hpolicy)
return False | Filters the chart widget for the resize event to modify this scenes
rect.
:param object | <QObject>
event | <QEvent> |
def dropHistoricalTable(apps, schema_editor):
"""
Drops the historical sap_success_factors table named herein.
"""
table_name = 'sap_success_factors_historicalsapsuccessfactorsenterprisecus80ad'
if table_name in connection.introspection.table_names():
migrations.DeleteModel(
name=table_name,
) | Drops the historical sap_success_factors table named herein. |
def update_os_image_from_image_reference(self, image_name, os_image):
'''
Updates metadata elements from a given OS image reference.
image_name:
The name of the image to update.
os_image:
An instance of OSImage class.
os_image.label: Optional. Specifies an identifier for the image.
os_image.description: Optional. Specifies the description of the image.
os_image.language: Optional. Specifies the language of the image.
os_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
os_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
os_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
os_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
os_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
os_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
os_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
os.image.media_link:
Required: Specifies the location of the blob in Windows Azure
blob store where the media for the image is located. The blob
location must belong to a storage account in the subscription
specified by the <subscription-id> value in the operation call.
Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
os_image.name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more VM Roles.
os_image.os:
The operating system type of the OS image. Possible values are:
Linux, Windows
'''
_validate_not_none('image_name', image_name)
_validate_not_none('os_image', os_image)
return self._perform_put(self._get_image_path(image_name),
_XmlSerializer.update_os_image_to_xml(os_image), as_async=True
) | Updates metadata elements from a given OS image reference.
image_name:
The name of the image to update.
os_image:
An instance of OSImage class.
os_image.label: Optional. Specifies an identifier for the image.
os_image.description: Optional. Specifies the description of the image.
os_image.language: Optional. Specifies the language of the image.
os_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
os_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
os_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
os_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
os_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
os_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
os_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
os.image.media_link:
Required: Specifies the location of the blob in Windows Azure
blob store where the media for the image is located. The blob
location must belong to a storage account in the subscription
specified by the <subscription-id> value in the operation call.
Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
os_image.name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more VM Roles.
os_image.os:
The operating system type of the OS image. Possible values are:
Linux, Windows |
def _get_asset(self, asset_uid):
"""
Returns raw response for an given asset by its unique id.
"""
uri = self.uri + '/v2/assets/' + asset_uid
headers = self._get_headers()
return self.service._get(uri, headers=headers) | Returns raw response for an given asset by its unique id. |
def is_type(self):
"""
:return:
:rtype: bool
"""
if self.__is_type_result is not None:
return self.__is_type_result
self.__is_type_result = self.__is_type()
return self.__is_type_result | :return:
:rtype: bool |
def update_rejection_permissions(portal):
"""Adds the permission 'Reject Analysis Request' and update the permission
mappings accordingly """
updated = update_rejection_permissions_for(portal, "bika_ar_workflow",
"Reject Analysis Request")
if updated:
brains = api.search(dict(portal_type="AnalysisRequest"),
CATALOG_ANALYSIS_REQUEST_LISTING)
update_rolemappings_for(brains, "bika_ar_workflow")
updated = update_rejection_permissions_for(portal, "bika_sample_workflow",
"Reject Sample")
if updated:
brains = api.search(dict(portal_type="Sample"), "bika_catalog")
update_rolemappings_for(brains, "bika_sample_workflow") | Adds the permission 'Reject Analysis Request' and update the permission
mappings accordingly |
def pos_development_directory(templates,
inventory,
context,
topics,
user,
item):
"""Return absolute path to development directory
Arguments:
templates (dict): templates.yaml
inventory (dict): inventory.yaml
context (dict): The be context, from context()
topics (list): Arguments to `in`
user (str): Current `be` user
item (str): Item from template-binding address
"""
replacement_fields = replacement_fields_from_context(context)
binding = binding_from_item(inventory, item)
pattern = pattern_from_template(templates, binding)
positional_arguments = find_positional_arguments(pattern)
highest_argument = find_highest_position(positional_arguments)
highest_available = len(topics) - 1
if highest_available < highest_argument:
echo("Template for \"%s\" requires at least %i arguments" % (
item, highest_argument + 1))
sys.exit(USER_ERROR)
try:
return pattern.format(*topics, **replacement_fields).replace("\\", "/")
except KeyError as exc:
echo("TEMPLATE ERROR: %s is not an available key\n" % exc)
echo("Available tokens:")
for key in replacement_fields:
echo("\n- %s" % key)
sys.exit(TEMPLATE_ERROR) | Return absolute path to development directory
Arguments:
templates (dict): templates.yaml
inventory (dict): inventory.yaml
context (dict): The be context, from context()
topics (list): Arguments to `in`
user (str): Current `be` user
item (str): Item from template-binding address |
def detect_images_and_galleries(generators):
"""Runs generator on both pages and articles."""
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in itertools.chain(generator.articles, generator.translations, generator.drafts):
detect_image(generator, article)
detect_gallery(generator, article)
elif isinstance(generator, PagesGenerator):
for page in itertools.chain(generator.pages, generator.translations, generator.hidden_pages):
detect_image(generator, page)
detect_gallery(generator, page) | Runs generator on both pages and articles. |
def invert(self):
'''
Invert by swapping each value with its key.
Returns
-------
MultiDict
Inverted multi-dict.
Examples
--------
>>> MultiDict({1: {1}, 2: {1,2,3}}, 4: {}).invert()
MultiDict({1: {1,2}, 2: {2}, 3: {2}})
'''
result = defaultdict(set)
for k, val in self.items():
result[val].add(k)
return MultiDict(dict(result)) | Invert by swapping each value with its key.
Returns
-------
MultiDict
Inverted multi-dict.
Examples
--------
>>> MultiDict({1: {1}, 2: {1,2,3}}, 4: {}).invert()
MultiDict({1: {1,2}, 2: {2}, 3: {2}}) |
def select(self, template_name):
"""
Select a particular template from the tribe.
:type template_name: str
:param template_name: Template name to look-up
:return: Template
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.select('b') # doctest: +NORMALIZE_WHITESPACE
Template b:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s
"""
return [t for t in self.templates if t.name == template_name][0] | Select a particular template from the tribe.
:type template_name: str
:param template_name: Template name to look-up
:return: Template
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.select('b') # doctest: +NORMALIZE_WHITESPACE
Template b:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s |
def applies(self, src, dst):
"""Checks if this rule applies to the given src and dst paths, based on the src pattern and
dst pattern given in the constructor.
If src pattern was None, this rule will apply to any given src path (same for dst).
"""
if self._src_pattern and (src is None or re.search(self._src_pattern, src) is None):
return False
elif self._dst_pattern and (dst is None or re.search(self._dst_pattern, dst) is None):
return False
return True | Checks if this rule applies to the given src and dst paths, based on the src pattern and
dst pattern given in the constructor.
If src pattern was None, this rule will apply to any given src path (same for dst). |
def get_account(self):
"""Get details of the current account.
:returns: an account object.
:rtype: Account
"""
api = self._get_api(iam.DeveloperApi)
return Account(api.get_my_account_info(include="limits, policies")) | Get details of the current account.
:returns: an account object.
:rtype: Account |
def container_query(self, query, quiet=False):
'''search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query)
'''
results = self._list_containers()
matches = []
for result in results:
for key,val in result.metadata.items():
if query in val and result not in matches:
matches.append(result)
if not quiet:
bot.info("[gs://%s] Found %s containers" %(self._bucket_name,len(matches)))
for image in matches:
size = round(image.size / (1024*1024.0))
bot.custom(prefix=image.name, color="CYAN")
bot.custom(prefix='id: ', message=image.id)
bot.custom(prefix='uri: ', message=image.metadata['name'])
bot.custom(prefix='updated:', message=image.updated)
bot.custom(prefix='size: ', message=' %s MB' %(size))
bot.custom(prefix='md5: ', message=image.md5_hash)
if "public_url" in image.metadata:
public_url = image.metadata['public_url']
bot.custom(prefix='url: ', message=public_url)
bot.newline()
return matches | search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query) |
def _classic_get_grouped_dicoms(dicom_input):
"""
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
"""
# Loop overall files and build dict
# Order all dicom files by InstanceNumber
if [d for d in dicom_input if 'InstanceNumber' in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
# now group per stack
grouped_dicoms = []
# loop over all sorted dicoms
stack_position_tag = Tag(0x0020, 0x0012) # in this case it is the acquisition number
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
if stack_position_tag not in dicom_:
stack_index = 0
else:
stack_index = dicom_[stack_position_tag].value - 1
while len(grouped_dicoms) <= stack_index:
grouped_dicoms.append([])
grouped_dicoms[stack_index].append(dicom_)
return grouped_dicoms | Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data |
def stage(self, name, pipeline_counter=None):
"""Helper to instantiate a :class:`gocd.api.stage.Stage` object
Args:
name: The name of the stage
pipeline_counter:
Returns:
"""
return Stage(
self.server,
pipeline_name=self.name,
stage_name=name,
pipeline_counter=pipeline_counter,
) | Helper to instantiate a :class:`gocd.api.stage.Stage` object
Args:
name: The name of the stage
pipeline_counter:
Returns: |
def watch_docs(ctx):
"""Run build the docs when a file changes."""
try:
import sphinx_autobuild # noqa
except ImportError:
print('ERROR: watch task requires the sphinx_autobuild package.')
print('Install it with:')
print(' pip install sphinx-autobuild')
sys.exit(1)
docs(ctx)
ctx.run('sphinx-autobuild {} {}'.format(docs_dir, build_dir), pty=True) | Run build the docs when a file changes. |
def format_time(time):
""" Formats the given time into HH:MM:SS """
h, r = divmod(time / 1000, 3600)
m, s = divmod(r, 60)
return "%02d:%02d:%02d" % (h, m, s) | Formats the given time into HH:MM:SS |
def _show_notification(self,
event, summary, message, icon,
*actions):
"""
Show a notification.
:param str event: event name
:param str summary: notification title
:param str message: notification body
:param str icon: icon name
:param actions: each item is a tuple with parameters for _add_action
"""
notification = self._notify(summary, message, icon)
timeout = self._get_timeout(event)
if timeout != -1:
notification.set_timeout(int(timeout * 1000))
for action in actions:
if action and self._action_enabled(event, action[0]):
self._add_action(notification, *action)
try:
notification.show()
except GLib.GError as exc:
# Catch and log the exception. Starting udiskie with notifications
# enabled while not having a notification service installed is a
# mistake too easy to be made, but it shoud not render the rest of
# udiskie's logic useless by raising an exception before the
# automount handler gets invoked.
self._log.error(_("Failed to show notification: {0}", exc_message(exc)))
self._log.debug(format_exc()) | Show a notification.
:param str event: event name
:param str summary: notification title
:param str message: notification body
:param str icon: icon name
:param actions: each item is a tuple with parameters for _add_action |
def register_app(self, app):
"""Register the route object to a `bottle.Bottle` app instance.
Args:
app (instance):
Returns:
Route instance (for chaining purposes)
"""
app.route(self.uri, methods=self.methods)(self.callable_obj)
return self | Register the route object to a `bottle.Bottle` app instance.
Args:
app (instance):
Returns:
Route instance (for chaining purposes) |
def add_scalar(self, name, value, step):
"""Log a scalar variable."""
self.writer.add_scalar(name, value, step) | Log a scalar variable. |
def timezone(client, location, timestamp=None, language=None):
"""Get time zone for a location on the earth, as well as that location's
time offset from UTC.
:param location: The latitude/longitude value representing the location to
look up.
:type location: string, dict, list, or tuple
:param timestamp: Timestamp specifies the desired time as seconds since
midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied. Times
before 1970 can be expressed as negative values. Optional. Defaults to
``datetime.utcnow()``.
:type timestamp: int or datetime.datetime
:param language: The language in which to return results.
:type language: string
:rtype: dict
"""
params = {
"location": convert.latlng(location),
"timestamp": convert.time(timestamp or datetime.utcnow())
}
if language:
params["language"] = language
return client._request( "/maps/api/timezone/json", params) | Get time zone for a location on the earth, as well as that location's
time offset from UTC.
:param location: The latitude/longitude value representing the location to
look up.
:type location: string, dict, list, or tuple
:param timestamp: Timestamp specifies the desired time as seconds since
midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied. Times
before 1970 can be expressed as negative values. Optional. Defaults to
``datetime.utcnow()``.
:type timestamp: int or datetime.datetime
:param language: The language in which to return results.
:type language: string
:rtype: dict |
def qtrim_front(self, name, size=1):
"""
Sets the list element at ``index`` to ``value``. An error is returned for out of
range indexes.
:param string name: the queue name
:param int size: the max length of removed elements
:return: the length of removed elements
:rtype: int
"""
size = get_positive_integer("size", size)
return self.execute_command('qtrim_front', name, size) | Sets the list element at ``index`` to ``value``. An error is returned for out of
range indexes.
:param string name: the queue name
:param int size: the max length of removed elements
:return: the length of removed elements
:rtype: int |
def expand(self, v):
"""Calculates the differences between a series of given measure values:
it calculates baseline values from position values.
:params v: a measure (of type 'baseline', 'position' or 'uvw')
:returns: a `dict` with the value for key `measures` being a measure
and the value for key `xyz` a quantity containing the
differences.
Example::
>>> from casacore.quanta import quantity
>>> x = quantity([10,50],'m')
>>> y = quantity([20,100],'m')
>>> z = quantity([30,150],'m')
>>> sb = dm.baseline('itrf', x, y, z)
>>> out = dm.expand(sb)
>>> print out['xyz']
[40.000000000000014, 80.0, 120.0] m
"""
if not is_measure(v) or v['type'] not in ['baseline',
'position', 'uvw']:
raise TypeError("Can only expand baselines, positions, or uvw")
vw = v.copy()
vw['type'] = "uvw"
vw['refer'] = "J2000"
outm = _measures.expand(self, vw)
outm['xyz'] = dq.quantity(outm['xyz'])
outm['measure']['type'] = v['type']
outm['measure']['refer'] = v['refer']
return outm | Calculates the differences between a series of given measure values:
it calculates baseline values from position values.
:params v: a measure (of type 'baseline', 'position' or 'uvw')
:returns: a `dict` with the value for key `measures` being a measure
and the value for key `xyz` a quantity containing the
differences.
Example::
>>> from casacore.quanta import quantity
>>> x = quantity([10,50],'m')
>>> y = quantity([20,100],'m')
>>> z = quantity([30,150],'m')
>>> sb = dm.baseline('itrf', x, y, z)
>>> out = dm.expand(sb)
>>> print out['xyz']
[40.000000000000014, 80.0, 120.0] m |
def volatility(tnet, distance_func_name='default', calc='global', communities=None, event_displacement=None):
r"""
Volatility of temporal networks.
Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge).
Parameters
----------
tnet : array or dict
temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','wd'
D : str
Distance function. Following options available: 'default', 'hamming', 'euclidean'. (Default implies hamming for binary networks, euclidean for weighted).
calc : str
Version of volaitility to caclulate. Possibilities include:
'global' - (default): the average distance of all nodes for each consecutive time point).
'edge' - average distance between consecutive time points for each edge). Takes considerably longer
'node' - (i.e. returns the average per node output when calculating volatility per 'edge').
'time' - returns volatility per time point
'communities' - returns volatility per communitieswork id (see communities). Also is returned per time-point and this may be changed in the future (with additional options)
'event_displacement' - calculates the volatility from a specified point. Returns time-series.
communities : array
Array of indicies for community (eiter (node) or (node,time) dimensions).
event_displacement : int
if calc = event_displacement specify the temporal index where all other time-points are calculated in relation too.
Notes
-----
Volatility calculates the difference between network snapshots.
.. math:: V_t = D(G_t,G_{t+1})
Where D is some distance function (e.g. Hamming distance for binary matrices).
V can be calculated for the entire network (global), but can also be calculated for individual edges, nodes or given a community vector.
Index of communities are returned "as is" with a shape of [max(communities)+1,max(communities)+1]. So if the indexes used are [1,2,3,5], V.shape==(6,6). The returning V[1,2] will correspond indexes 1 and 2. And missing index (e.g. here 0 and 4 will be NANs in rows and columns). If this behaviour is unwanted, call clean_communitiesdexes first. This will probably change.
Examples
--------
Import everything needed.
>>> import teneto
>>> import numpy
>>> np.random.seed(1)
>>> tnet = teneto.TemporalNetwork(nettype='bu')
Here we generate a binary network where edges have a 0.5 change of going "on", and once on a 0.2 change to go "off"
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,0.2))
Calculate the volatility
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.5555555555555556
If we change the probabilities to instead be certain edges disapeared the time-point after the appeared:
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,1))
This will make a more volatile network
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.1111111111111111
We can calculate the volatility per time instead
>>> vol_time = tnet.calc_networkmeasure('volatility', calc='time', distance_func_name='hamming')
>>> len(vol_time)
9
>>> vol_time[0]
0.3333333333333333
Or per node:
>>> vol_node = tnet.calc_networkmeasure('volatility', calc='node', distance_func_name='hamming')
>>> vol_node
array([0.07407407, 0.07407407, 0.07407407])
Here we see the volatility for each node was the same.
It is also possible to pass a community vector and the function will return volatility both within and between each community.
So the following has two communities:
>>> vol_com = tnet.calc_networkmeasure('volatility', calc='communities', communities=[0,1,1], distance_func_name='hamming')
>>> vol_com.shape
(2, 2, 9)
>>> vol_com[:,:,0]
array([[nan, 0.5],
[0.5, 0. ]])
And we see that, at time-point 0, there is some volatility between community 0 and 1 but no volatility within community 1. The reason for nan appearing is due to there only being 1 node in community 0.
Output
------
vol : array
"""
# Get input (C or G)
tnet, netinfo = process_input(tnet, ['C', 'G', 'TN'])
distance_func_name = check_distance_funciton_input(
distance_func_name, netinfo)
if not isinstance(distance_func_name, str):
raise ValueError('Distance metric must be a string')
# If not directional, only calc on the uppertriangle
if netinfo['nettype'][1] == 'd':
ind = np.triu_indices(tnet.shape[0], k=-tnet.shape[0])
elif netinfo['nettype'][1] == 'u':
ind = np.triu_indices(tnet.shape[0], k=1)
if calc == 'communities':
# Make sure communities is np array for indexing later on.
communities = np.array(communities)
if len(communities) != netinfo['netshape'][0]:
raise ValueError(
'When processing per network, communities vector must equal the number of nodes')
if communities.min() < 0:
raise ValueError(
'Communitiy assignments must be positive integers')
# Get chosen distance metric fucntion
distance_func = getDistanceFunction(distance_func_name)
if calc == 'global':
vol = np.mean([distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)])
elif calc == 'time':
vol = [distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'event_displacement':
vol = [distance_func(tnet[ind[0], ind[1], event_displacement],
tnet[ind[0], ind[1], t]) for t in range(0, tnet.shape[-1])]
# This takes quite a bit of time to loop through. When calculating per edge/node.
elif calc == 'edge' or calc == 'node':
vol = np.zeros([tnet.shape[0], tnet.shape[1]])
for i in ind[0]:
for j in ind[1]:
vol[i, j] = np.mean([distance_func(
tnet[i, j, t], tnet[i, j, t + 1]) for t in range(0, tnet.shape[-1] - 1)])
if netinfo['nettype'][1] == 'u':
vol = vol + np.transpose(vol)
if calc == 'node':
vol = np.mean(vol, axis=1)
elif calc == 'communities':
net_id = set(communities)
vol = np.zeros([max(net_id) + 1, max(net_id) +
1, netinfo['netshape'][-1] - 1])
for net1 in net_id:
for net2 in net_id:
if net1 != net2:
vol[net1, net2, :] = [distance_func(tnet[communities == net1][:, communities == net2, t].flatten(),
tnet[communities == net1][:, communities == net2, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
else:
nettmp = tnet[communities ==
net1][:, communities == net2, :]
triu = np.triu_indices(nettmp.shape[0], k=1)
nettmp = nettmp[triu[0], triu[1], :]
vol[net1, net2, :] = [distance_func(nettmp[:, t].flatten(
), nettmp[:, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'withincommunities':
withi = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] == communities[ind[1][n]]])
vol = [distance_func(tnet[withi[:, 0], withi[:, 1], t], tnet[withi[:, 0],
withi[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'betweencommunities':
beti = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] != communities[ind[1][n]]])
vol = [distance_func(tnet[beti[:, 0], beti[:, 1], t], tnet[beti[:, 0],
beti[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
return vol | r"""
Volatility of temporal networks.
Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge).
Parameters
----------
tnet : array or dict
temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','wd'
D : str
Distance function. Following options available: 'default', 'hamming', 'euclidean'. (Default implies hamming for binary networks, euclidean for weighted).
calc : str
Version of volaitility to caclulate. Possibilities include:
'global' - (default): the average distance of all nodes for each consecutive time point).
'edge' - average distance between consecutive time points for each edge). Takes considerably longer
'node' - (i.e. returns the average per node output when calculating volatility per 'edge').
'time' - returns volatility per time point
'communities' - returns volatility per communitieswork id (see communities). Also is returned per time-point and this may be changed in the future (with additional options)
'event_displacement' - calculates the volatility from a specified point. Returns time-series.
communities : array
Array of indicies for community (eiter (node) or (node,time) dimensions).
event_displacement : int
if calc = event_displacement specify the temporal index where all other time-points are calculated in relation too.
Notes
-----
Volatility calculates the difference between network snapshots.
.. math:: V_t = D(G_t,G_{t+1})
Where D is some distance function (e.g. Hamming distance for binary matrices).
V can be calculated for the entire network (global), but can also be calculated for individual edges, nodes or given a community vector.
Index of communities are returned "as is" with a shape of [max(communities)+1,max(communities)+1]. So if the indexes used are [1,2,3,5], V.shape==(6,6). The returning V[1,2] will correspond indexes 1 and 2. And missing index (e.g. here 0 and 4 will be NANs in rows and columns). If this behaviour is unwanted, call clean_communitiesdexes first. This will probably change.
Examples
--------
Import everything needed.
>>> import teneto
>>> import numpy
>>> np.random.seed(1)
>>> tnet = teneto.TemporalNetwork(nettype='bu')
Here we generate a binary network where edges have a 0.5 change of going "on", and once on a 0.2 change to go "off"
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,0.2))
Calculate the volatility
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.5555555555555556
If we change the probabilities to instead be certain edges disapeared the time-point after the appeared:
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,1))
This will make a more volatile network
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.1111111111111111
We can calculate the volatility per time instead
>>> vol_time = tnet.calc_networkmeasure('volatility', calc='time', distance_func_name='hamming')
>>> len(vol_time)
9
>>> vol_time[0]
0.3333333333333333
Or per node:
>>> vol_node = tnet.calc_networkmeasure('volatility', calc='node', distance_func_name='hamming')
>>> vol_node
array([0.07407407, 0.07407407, 0.07407407])
Here we see the volatility for each node was the same.
It is also possible to pass a community vector and the function will return volatility both within and between each community.
So the following has two communities:
>>> vol_com = tnet.calc_networkmeasure('volatility', calc='communities', communities=[0,1,1], distance_func_name='hamming')
>>> vol_com.shape
(2, 2, 9)
>>> vol_com[:,:,0]
array([[nan, 0.5],
[0.5, 0. ]])
And we see that, at time-point 0, there is some volatility between community 0 and 1 but no volatility within community 1. The reason for nan appearing is due to there only being 1 node in community 0.
Output
------
vol : array |
def getPointOnLine(x1, y1, x2, y2, n):
"""Returns the (x, y) tuple of the point that has progressed a proportion
n along the line defined by the two x, y coordinates.
Copied from pytweening module.
"""
x = ((x2 - x1) * n) + x1
y = ((y2 - y1) * n) + y1
return (x, y) | Returns the (x, y) tuple of the point that has progressed a proportion
n along the line defined by the two x, y coordinates.
Copied from pytweening module. |
def checkBim(fileName, minNumber, chromosome):
"""Checks the BIM file for chrN markers.
:param fileName:
:param minNumber:
:param chromosome:
:type fileName: str
:type minNumber: int
:type chromosome: str
:returns: ``True`` if there are at least ``minNumber`` markers on
chromosome ``chromosome``, ``False`` otherwise.
"""
nbMarkers = 0
with open(fileName, 'r') as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split("\t")
if row[0] == chromosome:
nbMarkers += 1
if nbMarkers < minNumber:
return False
return True | Checks the BIM file for chrN markers.
:param fileName:
:param minNumber:
:param chromosome:
:type fileName: str
:type minNumber: int
:type chromosome: str
:returns: ``True`` if there are at least ``minNumber`` markers on
chromosome ``chromosome``, ``False`` otherwise. |
def can_take(attrs_to_freeze=(), defaults=None, source_attr='source', instance_property_name='snapshot', inner_class_name='Snapshot'):
"""
Decorator to make a class allow their instances to generate
snapshot of themselves.
Decorates the class by allowing it to have:
* A custom class to serve each snapshot. Such class
will have a subset of attributes to serve from the object,
and a special designed attribute ('source', by default) to
serve the originating object. Such class will be stored
under custom name under the generating (decorated) class.
* An instance method (actually: property) which will yield
the snapshot for the instance.
"""
def wrapper(klass):
Snapshot = namedtuple(inner_class_name, tuple(attrs_to_freeze) + (source_attr,))
doc = """
From the current instance collects the following attributes:
%s
Additionally, using the attribute '%s', collects a reference
to the current instance.
""" % (', '.join(attrs_to_freeze), source_attr)
def instance_method(self):
return Snapshot(**dict({
k: (getattr(self, k, defaults(k)) if callable(defaults) else getattr(self, k)) for k in attrs_to_freeze
}, **{source_attr: self}))
instance_method.__doc__ = doc
setattr(klass, instance_property_name, property(instance_method))
setattr(klass, inner_class_name, Snapshot)
return klass
return wrapper | Decorator to make a class allow their instances to generate
snapshot of themselves.
Decorates the class by allowing it to have:
* A custom class to serve each snapshot. Such class
will have a subset of attributes to serve from the object,
and a special designed attribute ('source', by default) to
serve the originating object. Such class will be stored
under custom name under the generating (decorated) class.
* An instance method (actually: property) which will yield
the snapshot for the instance. |
def show_warning(self, index):
"""
Decide if showing a warning when the user is trying to view
a big variable associated to a Tablemodel index
This avoids getting the variables' value to know its
size and type, using instead those already computed by
the TableModel.
The problem is when a variable is too big, it can take a
lot of time just to get its value
"""
try:
val_size = index.model().sizes[index.row()]
val_type = index.model().types[index.row()]
except:
return False
if val_type in ['list', 'set', 'tuple', 'dict'] and \
int(val_size) > 1e5:
return True
else:
return False | Decide if showing a warning when the user is trying to view
a big variable associated to a Tablemodel index
This avoids getting the variables' value to know its
size and type, using instead those already computed by
the TableModel.
The problem is when a variable is too big, it can take a
lot of time just to get its value |
def add_group(self, number, name, led_type):
""" Add a group.
:param number: Group number (1-4).
:param name: Group name.
:param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`.
:returns: Added group.
"""
group = group_factory(self, number, name, led_type)
self.groups.append(group)
return group | Add a group.
:param number: Group number (1-4).
:param name: Group name.
:param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`.
:returns: Added group. |
def __parse(value):
"""
Parse the string datetime.
Supports the subset of ISO8601 used by xsd:dateTime, but is lenient
with what is accepted, handling most reasonable syntax.
Subsecond information is rounded to microseconds due to a restriction
in the python datetime.datetime/time implementation.
@param value: A datetime string.
@type value: str
@return: A datetime object.
@rtype: B{datetime}.I{datetime}
"""
match_result = _RE_DATETIME.match(value)
if match_result is None:
raise ValueError("date data has invalid format '%s'" % (value,))
date = _date_from_match(match_result)
time, round_up = _time_from_match(match_result)
tzinfo = _tzinfo_from_match(match_result)
value = datetime.datetime.combine(date, time)
value = value.replace(tzinfo=tzinfo)
if round_up:
value += datetime.timedelta(microseconds=1)
return value | Parse the string datetime.
Supports the subset of ISO8601 used by xsd:dateTime, but is lenient
with what is accepted, handling most reasonable syntax.
Subsecond information is rounded to microseconds due to a restriction
in the python datetime.datetime/time implementation.
@param value: A datetime string.
@type value: str
@return: A datetime object.
@rtype: B{datetime}.I{datetime} |
def get_instance(self, payload):
"""
Build an instance of SipInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.SipInstance
:rtype: twilio.rest.api.v2010.account.sip.SipInstance
"""
return SipInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of SipInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.SipInstance
:rtype: twilio.rest.api.v2010.account.sip.SipInstance |
def _initialize_installation(self):
"""
:rtype: None
"""
private_key_client = security.generate_rsa_private_key()
installation = core.Installation.create(
self,
security.public_key_to_string(private_key_client.publickey())
).value
token = installation.token.token
public_key_server_string = \
installation.server_public_key.server_public_key
public_key_server = RSA.import_key(public_key_server_string)
self._installation_context = InstallationContext(
token,
private_key_client,
public_key_server
) | :rtype: None |
def initialize(self, **kwargs):
"""
Transfer functions may need additional information before the
supplied numpy array can be modified in place. For instance,
transfer functions may have state which needs to be allocated
in memory with a certain size. In other cases, the transfer
function may need to know about the coordinate system
associated with the input data.
"""
if not set(kwargs.keys()).issuperset(self.init_keys):
raise Exception("TransferFn needs to be initialized with %s"
% ','.join(repr(el) for el in self.init_keys)) | Transfer functions may need additional information before the
supplied numpy array can be modified in place. For instance,
transfer functions may have state which needs to be allocated
in memory with a certain size. In other cases, the transfer
function may need to know about the coordinate system
associated with the input data. |
def _run_coro(self, value):
""" Start the coroutine as task """
# when LAST_DISTINCT is used only start coroutine when value changed
if self._options.mode is MODE.LAST_DISTINCT and \
value == self._last_emit:
self._future = None
return
# store the value to be emitted for LAST_DISTINCT
self._last_emit = value
# publish the start of the coroutine
self.scheduled.notify(value)
# build the coroutine
values = value if self._options.unpack else (value,)
coro = self._options.coro(*values, *self._options.args,
**self._options.kwargs)
# create a task out of it and add ._future_done as callback
self._future = asyncio.ensure_future(coro)
self._future.add_done_callback(self._future_done) | Start the coroutine as task |
def update(self, story, params={}, **options):
"""Updates the story and returns the full record for the updated story.
Only comment stories can have their text updated, and only comment stories and
attachment stories can be pinned. Only one of `text` and `html_text` can be specified.
Parameters
----------
story : {Id} Globally unique identifier for the story.
[data] : {Object} Data for the request
- [text] : {String} The plain text with which to update the comment.
- [html_text] : {String} The rich text with which to update the comment.
- [is_pinned] : {Boolean} Whether the story should be pinned on the resource.
"""
path = "/stories/%s" % (story)
return self.client.put(path, params, **options) | Updates the story and returns the full record for the updated story.
Only comment stories can have their text updated, and only comment stories and
attachment stories can be pinned. Only one of `text` and `html_text` can be specified.
Parameters
----------
story : {Id} Globally unique identifier for the story.
[data] : {Object} Data for the request
- [text] : {String} The plain text with which to update the comment.
- [html_text] : {String} The rich text with which to update the comment.
- [is_pinned] : {Boolean} Whether the story should be pinned on the resource. |
def set_interval(self, start, end, value, compact=False):
"""Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway.
"""
# for each interval to render
for i, (s, e, v) in enumerate(self.iterperiods(start, end)):
# look at all intervals included in the current interval
# (always at least 1)
if i == 0:
# if the first, set initial value to new value of range
self.set(s, value, compact)
else:
# otherwise, remove intermediate key
del self[s]
# finish by setting the end of the interval to the previous value
self.set(end, v, compact) | Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway. |
def filesampler(files, testsetsize = 0.1, devsetsize = 0, trainsetsize = 0, outputdir = '', encoding='utf-8'):
"""Extract a training set, test set and optimally a development set from one file, or multiple *interdependent* files (such as a parallel corpus). It is assumed each line contains one instance (such as a word or sentence for example)."""
if not isinstance(files, list):
files = list(files)
total = 0
for filename in files:
f = io.open(filename,'r', encoding=encoding)
count = 0
for line in f:
count += 1
f.close()
if total == 0:
total = count
elif total != count:
raise Exception("Size mismatch, when multiple files are specified they must contain the exact same amount of lines! (" +str(count) + " vs " + str(total) +")")
#support for relative values:
if testsetsize < 1:
testsetsize = int(total * testsetsize)
if devsetsize < 1 and devsetsize > 0:
devsetsize = int(total * devsetsize)
if testsetsize >= total or devsetsize >= total or testsetsize + devsetsize >= total:
raise Exception("Test set and/or development set too large! No samples left for training set!")
trainset = {}
testset = {}
devset = {}
for i in range(1,total+1):
trainset[i] = True
for i in random.sample(trainset.keys(), int(testsetsize)):
testset[i] = True
del trainset[i]
if devsetsize > 0:
for i in random.sample(trainset.keys(), int(devsetsize)):
devset[i] = True
del trainset[i]
if trainsetsize > 0:
newtrainset = {}
for i in random.sample(trainset.keys(), int(trainsetsize)):
newtrainset[i] = True
trainset = newtrainset
for filename in files:
if not outputdir:
ftrain = io.open(filename + '.train','w',encoding=encoding)
else:
ftrain = io.open(outputdir + '/' + os.path.basename(filename) + '.train','w',encoding=encoding)
if not outputdir:
ftest = io.open(filename + '.test','w',encoding=encoding)
else:
ftest = io.open(outputdir + '/' + os.path.basename(filename) + '.test','w',encoding=encoding)
if devsetsize > 0:
if not outputdir:
fdev = io.open(filename + '.dev','w',encoding=encoding)
else:
fdev = io.open(outputdir + '/' + os.path.basename(filename) + '.dev','w',encoding=encoding)
f = io.open(filename,'r',encoding=encoding)
for linenum, line in enumerate(f):
if linenum+1 in trainset:
ftrain.write(line)
elif linenum+1 in testset:
ftest.write(line)
elif devsetsize > 0 and linenum+1 in devset:
fdev.write(line)
f.close()
ftrain.close()
ftest.close()
if devsetsize > 0: fdev.close() | Extract a training set, test set and optimally a development set from one file, or multiple *interdependent* files (such as a parallel corpus). It is assumed each line contains one instance (such as a word or sentence for example). |
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
"""
changes = ChangeSet('Moving method <%s>' % self.method_name)
if resources is None:
resources = self.project.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.project)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = libutils.get_string_module(
self.project, result, resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes | Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files. |
def _set_child(self, name, child):
"""
Set child.
:param name: Child name.
:param child: Parentable object.
"""
if not isinstance(child, Parentable):
raise ValueError('Parentable child object expected, not {child}'.format(child=child))
child._set_parent(self)
self._store_child(name, child) | Set child.
:param name: Child name.
:param child: Parentable object. |
def _ParseVSSProcessingOptions(self, options):
"""Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
vss_only = False
vss_stores = None
self._process_vss = not getattr(options, 'no_vss', False)
if self._process_vss:
vss_only = getattr(options, 'vss_only', False)
vss_stores = getattr(options, 'vss_stores', None)
if vss_stores:
try:
self._ParseVolumeIdentifiersString(vss_stores, prefix='vss')
except ValueError:
raise errors.BadConfigOption('Unsupported VSS stores')
self._vss_only = vss_only
self._vss_stores = vss_stores | Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid. |
def macro_list(self, args: argparse.Namespace) -> None:
"""List some or all macros"""
if args.name:
for cur_name in utils.remove_duplicates(args.name):
if cur_name in self.macros:
self.poutput("macro create {} {}".format(cur_name, self.macros[cur_name].value))
else:
self.perror("Macro '{}' not found".format(cur_name), traceback_war=False)
else:
sorted_macros = utils.alphabetical_sort(self.macros)
for cur_macro in sorted_macros:
self.poutput("macro create {} {}".format(cur_macro, self.macros[cur_macro].value)) | List some or all macros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.