code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def sync(self, ws_name):
"""Synchronise workspace's repositories."""
path = self.config["workspaces"][ws_name]["path"]
repositories = self.config["workspaces"][ws_name]["repositories"]
logger = logging.getLogger(__name__)
color = Color()
for r in os.listdir(path):
try:
repo = Repository(os.path.join(path, r))
except RepositoryError:
continue
else:
repositories[r] = repo.path
for repo_name, path in repositories.items():
logger.info(color.colored(
" - %s" % repo_name, "blue"))
self.config["workspaces"][ws_name]["repositories"]
self.config.write() | Synchronise workspace's repositories. |
def set_windows_env_var(key, value):
"""Set an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
if not isinstance(value, text_type):
raise TypeError("%r not of type %r" % (value, text_type))
status = winapi.SetEnvironmentVariableW(key, value)
if status == 0:
raise ctypes.WinError() | Set an env var.
Raises:
WindowsError |
def x(self, x):
"""Project x as y"""
if x is None:
return None
if self._force_vertical:
return super(HorizontalLogView, self).x(x)
return super(XLogView, self).y(x) | Project x as y |
def log_predictive_density(self, x_test, y_test, Y_metadata=None):
"""
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points
"""
mu_star, var_star = self._raw_predict(x_test)
return self.likelihood.log_predictive_density(y_test, mu_star, var_star, Y_metadata=Y_metadata) | Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points |
def locator(self, value):
"""Update the locator, and trigger a latitude and longitude update.
Args:
value (str): New Maidenhead locator string
"""
self._locator = value
self._latitude, self._longitude = utils.from_grid_locator(value) | Update the locator, and trigger a latitude and longitude update.
Args:
value (str): New Maidenhead locator string |
def is_depfilter_handler(class_, filter_name, filter_):
"""
Return true if `filter_` has been decorated with :func:`depfilter` for the
given filter and class.
"""
try:
handlers = get_magic_attr(filter_)
except AttributeError:
return False
return _depfilter_spec(class_, filter_name) in handlers | Return true if `filter_` has been decorated with :func:`depfilter` for the
given filter and class. |
def _BuildHttpRoutingMap(self, router_cls):
"""Builds a werkzeug routing map out of a given router class."""
if not issubclass(router_cls, api_call_router.ApiCallRouter):
raise ValueError("Router has to be an instance of ApiCallRouter.")
routing_map = routing.Map()
# Note: we traverse methods of the base class (ApiCallRouter) to avoid
# potential problems caused by child router classes using the @Http
# annotation (thus adding additional unforeseen HTTP paths/methods). We
# don't want the HTTP API to depend on a particular router implementation.
for _, metadata in iteritems(router_cls.GetAnnotatedMethods()):
for http_method, path, unused_options in metadata.http_methods:
routing_map.add(
routing.Rule(path, methods=[http_method], endpoint=metadata))
# This adds support for the next version of the API that uses
# standartized JSON protobuf serialization.
routing_map.add(
routing.Rule(
path.replace("/api/", "/api/v2/"),
methods=[http_method],
endpoint=metadata))
return routing_map | Builds a werkzeug routing map out of a given router class. |
def convert_job(row: list) -> dict:
"""Convert sacct row to dict."""
state = row[-2]
start_time_raw = row[-4]
end_time_raw = row[-3]
if state not in ('PENDING', 'CANCELLED'):
start_time = datetime.strptime(start_time_raw, '%Y-%m-%dT%H:%M:%S')
if state != 'RUNNING':
end_time = datetime.strptime(end_time_raw, '%Y-%m-%dT%H:%M:%S')
else:
end_time = None
else:
start_time = end_time = None
# parse name of job
job_name = row[1]
step_name, step_context = job_name.rstrip('_BOTH').rstrip('_SV').rsplit('_', 1)
return {
'id': int(row[0]),
'name': job_name,
'step': step_name,
'context': step_context,
'state': state,
'start': start_time,
'end': end_time,
'elapsed': time_to_sec(row[-5]),
'cpu': time_to_sec(row[-6]),
'is_completed': state == 'COMPLETED',
} | Convert sacct row to dict. |
def getListForEvent(self, event=None):
''' Get the list of names associated with a particular event. '''
names = list(self.guestlistname_set.annotate(
guestType=Case(
When(notes__isnull=False, then=F('notes')),
default=Value(ugettext('Manually Added')),
output_field=models.CharField()
)
).values('firstName','lastName','guestType'))
# Component-by-component, OR append filters to an initial filter that always
# evaluates to False.
components = self.guestlistcomponent_set.all()
filters = Q(pk__isnull=True)
# Add prior staff based on the component rule.
for component in components:
if event and self.appliesToEvent(event):
filters = filters | self.getComponentFilters(component,event=event)
else:
filters = filters | self.getComponentFilters(component,dateTime=timezone.now())
# Add all event staff if that box is checked (no need for separate components)
if self.includeStaff and event and self.appliesToEvent(event):
filters = filters | Q(eventstaffmember__event=event)
# Execute the constructed query and add the names of staff
names += list(StaffMember.objects.filter(filters).annotate(
guestType=Case(
When(eventstaffmember__event=event, then=Concat(Value('Event Staff: '), 'eventstaffmember__category__name')),
default=Value(ugettext('Other Staff')),
output_field=models.CharField()
)
).distinct().values('firstName','lastName','guestType'))
if self.includeRegistrants and event and self.appliesToEvent(event):
names += list(Registration.objects.filter(eventregistration__event=event).annotate(
guestType=Value(_('Registered'),output_field=models.CharField())
).values('firstName','lastName','guestType'))
return names | Get the list of names associated with a particular event. |
def add(setname=None, entry=None, family='ipv4', **kwargs):
'''
Append an entry to the specified set.
CLI Example:
.. code-block:: bash
salt '*' ipset.add setname 192.168.1.26
salt '*' ipset.add setname 192.168.0.3,AA:BB:CC:DD:EE:FF
'''
if not setname:
return 'Error: Set needs to be specified'
if not entry:
return 'Error: Entry needs to be specified'
setinfo = _find_set_info(setname)
if not setinfo:
return 'Error: Set {0} does not exist'.format(setname)
settype = setinfo['Type']
cmd = '{0}'.format(entry)
if 'timeout' in kwargs:
if 'timeout' not in setinfo['Header']:
return 'Error: Set {0} not created with timeout support'.format(setname)
if 'packets' in kwargs or 'bytes' in kwargs:
if 'counters' not in setinfo['Header']:
return 'Error: Set {0} not created with counters support'.format(setname)
if 'comment' in kwargs:
if 'comment' not in setinfo['Header']:
return 'Error: Set {0} not created with comment support'.format(setname)
if 'comment' not in entry:
cmd = '{0} comment "{1}"'.format(cmd, kwargs['comment'])
if set(['skbmark', 'skbprio', 'skbqueue']) & set(kwargs):
if 'skbinfo' not in setinfo['Header']:
return 'Error: Set {0} not created with skbinfo support'.format(setname)
for item in _ADD_OPTIONS[settype]:
if item in kwargs:
cmd = '{0} {1} {2}'.format(cmd, item, kwargs[item])
current_members = _find_set_members(setname)
if cmd in current_members:
return 'Warn: Entry {0} already exists in set {1}'.format(cmd, setname)
# Using -exist to ensure entries are updated if the comment changes
cmd = '{0} add -exist {1} {2}'.format(_ipset_cmd(), setname, cmd)
out = __salt__['cmd.run'](cmd, python_shell=False)
if not out:
return 'Success'
return 'Error: {0}'.format(out) | Append an entry to the specified set.
CLI Example:
.. code-block:: bash
salt '*' ipset.add setname 192.168.1.26
salt '*' ipset.add setname 192.168.0.3,AA:BB:CC:DD:EE:FF |
def _connect(self):
"""
Attemps connection to the server
"""
self.logger.info("Attempting connection to %s:%s", self.server[0], self.server[1])
try:
self._open_socket()
peer = self.sock.getpeername()
self.logger.info("Connected to %s", str(peer))
# 5 second timeout to receive server banner
self.sock.setblocking(1)
self.sock.settimeout(5)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
banner = self.sock.recv(512)
if is_py3:
banner = banner.decode('latin-1')
if banner[0] == "#":
self.logger.debug("Banner: %s", banner.rstrip())
else:
raise ConnectionError("invalid banner from server")
except ConnectionError as e:
self.logger.error(str(e))
self.close()
raise
except (socket.error, socket.timeout) as e:
self.close()
self.logger.error("Socket error: %s" % str(e))
if str(e) == "timed out":
raise ConnectionError("no banner from server")
else:
raise ConnectionError(e)
self._connected = True | Attemps connection to the server |
def cli_tempurl(context, method, path, seconds=None, use_container=False):
"""
Generates a TempURL and sends that to the context.io_manager's
stdout.
See :py:mod:`swiftly.cli.tempurl` for context usage information.
See :py:class:`CLITempURL` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param method: The method for the TempURL (GET, PUT, etc.)
:param path: The path the TempURL should direct to.
:param seconds: The number of seconds the TempURL should be good
for. Default: 3600
:param use_container: If True, will create a container level TempURL
useing X-Container-Meta-Temp-Url-Key instead of
X-Account-Meta-Temp-Url-Key.
"""
with contextlib.nested(
context.io_manager.with_stdout(),
context.client_manager.with_client()) as (fp, client):
method = method.upper()
path = path.lstrip('/')
seconds = seconds if seconds is not None else 3600
if '/' not in path:
raise ReturnCode(
'invalid tempurl path %r; should have a / within it' % path)
if use_container:
key_type = 'container'
container = path.split('/', 1)[0]
status, reason, headers, contents = \
client.head_container(container)
else:
key_type = 'account'
status, reason, headers, contents = \
client.head_account()
if status // 100 != 2:
raise ReturnCode(
'obtaining X-%s-Meta-Temp-Url-Key: %s %s' %
(key_type.title(), status, reason))
key = headers.get('x-%s-meta-temp-url-key' % key_type)
if not key:
raise ReturnCode(
'there is no X-%s-Meta-Temp-Url-Key set for this %s' %
(key_type.title(), key_type))
url = client.storage_url + '/' + path
fp.write(generate_temp_url(method, url, seconds, key))
fp.write('\n')
fp.flush() | Generates a TempURL and sends that to the context.io_manager's
stdout.
See :py:mod:`swiftly.cli.tempurl` for context usage information.
See :py:class:`CLITempURL` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param method: The method for the TempURL (GET, PUT, etc.)
:param path: The path the TempURL should direct to.
:param seconds: The number of seconds the TempURL should be good
for. Default: 3600
:param use_container: If True, will create a container level TempURL
useing X-Container-Meta-Temp-Url-Key instead of
X-Account-Meta-Temp-Url-Key. |
def batch_accumulate(max_batch_size, a_generator, cooperator=None):
"""
Start a Deferred whose callBack arg is a deque of the accumulation
of the values yielded from a_generator which is iterated over
in batches the size of max_batch_size.
It should be more efficient to iterate over the generator in
batches and still provide enough speed for non-blocking execution.
:param max_batch_size: The number of iterations of the generator
to consume at a time.
:param a_generator: An iterator which yields some not None values.
:return: A Deferred to which the next callback will be called with
the yielded contents of the generator function.
"""
if cooperator:
own_cooperate = cooperator.cooperate
else:
own_cooperate = cooperate
spigot = ValueBucket()
items = stream_tap((spigot,), a_generator)
d = own_cooperate(i_batch(max_batch_size, items)).whenDone()
d.addCallback(accumulation_handler, spigot)
return d | Start a Deferred whose callBack arg is a deque of the accumulation
of the values yielded from a_generator which is iterated over
in batches the size of max_batch_size.
It should be more efficient to iterate over the generator in
batches and still provide enough speed for non-blocking execution.
:param max_batch_size: The number of iterations of the generator
to consume at a time.
:param a_generator: An iterator which yields some not None values.
:return: A Deferred to which the next callback will be called with
the yielded contents of the generator function. |
def _set_dense_defaults_and_eval(kwargs):
"""
Sets default values in kwargs if kwargs are not already given.
Evaluates all values using eval
Parameters
-----------
kwargs : dict
Dictionary of dense specific keyword args
Returns
-------
: dict
Default, evaluated dictionary
"""
kwargs['delimiter'] = kwargs.get('delimiter', ',')
kwargs['na_values'] = kwargs.get('na_values', '')
kwargs['nan_to_zero'] = kwargs.get('nan_to_zero', False)
kwargs['drop_na'] = kwargs.get('drop_na', False)
kwargs['label_col'] = kwargs.get('label_col', 'label')
kwargs['count_col'] = kwargs.get('count_col', 'count')
for key, val in kwargs.iteritems():
try:
kwargs[key] = eval(val)
except:
kwargs[key] = val
return kwargs | Sets default values in kwargs if kwargs are not already given.
Evaluates all values using eval
Parameters
-----------
kwargs : dict
Dictionary of dense specific keyword args
Returns
-------
: dict
Default, evaluated dictionary |
def submit(self, coro, callback=None):
"""Submit a coro as NewTask to self.loop without loop.frequncy control.
::
from torequests.dummy import Loop
import asyncio
loop = Loop()
async def test(i):
result = await asyncio.sleep(1)
return (loop.frequency, i)
coro = test(0)
task = loop.submit(coro)
print(task)
# loop.x can be ignore
loop.x
print(task.x)
# <NewTask pending coro=<test() running at torequests/temp_code.py:58>>
# (Frequency(sem=<0/0>, interval=0, name=loop_sem), 0)
"""
callback = callback or self.default_callback
if self.async_running:
return self.run_coroutine_threadsafe(coro, callback=callback)
else:
return NewTask(coro, loop=self.loop, callback=callback) | Submit a coro as NewTask to self.loop without loop.frequncy control.
::
from torequests.dummy import Loop
import asyncio
loop = Loop()
async def test(i):
result = await asyncio.sleep(1)
return (loop.frequency, i)
coro = test(0)
task = loop.submit(coro)
print(task)
# loop.x can be ignore
loop.x
print(task.x)
# <NewTask pending coro=<test() running at torequests/temp_code.py:58>>
# (Frequency(sem=<0/0>, interval=0, name=loop_sem), 0) |
def list_all_store_credit_transactions(cls, **kwargs):
"""List StoreCreditTransactions
Return a list of StoreCreditTransactions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_store_credit_transactions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[StoreCreditTransaction]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_store_credit_transactions_with_http_info(**kwargs)
else:
(data) = cls._list_all_store_credit_transactions_with_http_info(**kwargs)
return data | List StoreCreditTransactions
Return a list of StoreCreditTransactions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_store_credit_transactions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[StoreCreditTransaction]
If the method is called asynchronously,
returns the request thread. |
def _surpress_formatting_errors(fn):
"""
I know this is dangerous and the wrong way to solve the problem, but when
using both row and columns summaries it's easier to just swallow errors
so users can format their tables how they need.
"""
@wraps(fn)
def inner(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError:
return ""
return inner | I know this is dangerous and the wrong way to solve the problem, but when
using both row and columns summaries it's easier to just swallow errors
so users can format their tables how they need. |
def _parse_guild_info(self, info_container):
"""
Parses the guild's general information and applies the found values.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
"""
m = founded_regex.search(info_container.text)
if m:
description = m.group("desc").strip()
self.description = description if description else None
self.world = m.group("world")
self.founded = parse_tibia_date(m.group("date").replace("\xa0", " "))
self.active = "currently active" in m.group("status") | Parses the guild's general information and applies the found values.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container. |
def accept(self, *args):
'''Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None.
'''
token = self.peek()
if token is None:
return None
for arg in args:
if token.type == arg:
self.position += 1
return token
return None | Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None. |
def simulate(args):
"""
%prog simulate idsfile
Simulate random FASTA file based on idsfile, which is a two-column
tab-separated file with sequence name and size.
"""
p = OptionParser(simulate.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
fp = open(idsfile)
fw = must_open(opts.outfile, "w")
for row in fp:
name, size = row.split()
size = int(size)
simulate_one(fw, name, size)
fp.close() | %prog simulate idsfile
Simulate random FASTA file based on idsfile, which is a two-column
tab-separated file with sequence name and size. |
def _scale_fig_size(figsize, textsize, rows=1, cols=1):
"""Scale figure properties according to rows and cols.
Parameters
----------
figsize : float or None
Size of figure in inches
textsize : float or None
fontsize
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
figsize : float or None
Size of figure in inches
ax_labelsize : int
fontsize for axes label
titlesize : int
fontsize for title
xt_labelsize : int
fontsize for axes ticks
linewidth : int
linewidth
markersize : int
markersize
"""
params = mpl.rcParams
rc_width, rc_height = tuple(params["figure.figsize"])
rc_ax_labelsize = params["axes.labelsize"]
rc_titlesize = params["axes.titlesize"]
rc_xt_labelsize = params["xtick.labelsize"]
rc_linewidth = params["lines.linewidth"]
rc_markersize = params["lines.markersize"]
if isinstance(rc_ax_labelsize, str):
rc_ax_labelsize = 15
if isinstance(rc_titlesize, str):
rc_titlesize = 16
if isinstance(rc_xt_labelsize, str):
rc_xt_labelsize = 14
if figsize is None:
width, height = rc_width, rc_height
sff = 1 if (rows == cols == 1) else 1.15
width = width * cols * sff
height = height * rows * sff
else:
width, height = figsize
if textsize is not None:
scale_factor = textsize / rc_xt_labelsize
elif rows == cols == 1:
scale_factor = ((width * height) / (rc_width * rc_height)) ** 0.5
else:
scale_factor = 1
ax_labelsize = rc_ax_labelsize * scale_factor
titlesize = rc_titlesize * scale_factor
xt_labelsize = rc_xt_labelsize * scale_factor
linewidth = rc_linewidth * scale_factor
markersize = rc_markersize * scale_factor
return (width, height), ax_labelsize, titlesize, xt_labelsize, linewidth, markersize | Scale figure properties according to rows and cols.
Parameters
----------
figsize : float or None
Size of figure in inches
textsize : float or None
fontsize
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
figsize : float or None
Size of figure in inches
ax_labelsize : int
fontsize for axes label
titlesize : int
fontsize for title
xt_labelsize : int
fontsize for axes ticks
linewidth : int
linewidth
markersize : int
markersize |
def cluster(dset,min_distance,min_cluster_size,prefix=None):
'''clusters given ``dset`` connecting voxels ``min_distance``mm away with minimum cluster size of ``min_cluster_size``
default prefix is ``dset`` suffixed with ``_clust%d``'''
if prefix==None:
prefix = nl.suffix(dset,'_clust%d' % min_cluster_size)
return available_method('cluster')(dset,min_distance,min_cluster_size,prefix) | clusters given ``dset`` connecting voxels ``min_distance``mm away with minimum cluster size of ``min_cluster_size``
default prefix is ``dset`` suffixed with ``_clust%d`` |
def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
"""2D Convolutional LSTM."""
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = input_shape[1:]
else:
input_shape = spatial_dims + [input_channels]
cell = tf.contrib.rnn.ConvLSTMCell(
2, input_shape, output_channels,
[kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state | 2D Convolutional LSTM. |
def show_tables():
"""
Return the names of the tables currently in the database.
"""
_State.connection()
_State.reflect_metadata()
metadata = _State.metadata
response = select('name, sql from sqlite_master where type="table"')
return {row['name']: row['sql'] for row in response} | Return the names of the tables currently in the database. |
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles of alpha carbon backbone
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
"""
ca = [a.index for a in traj.top.atoms if a.name == 'CA']
if len(ca) < 4:
return np.zeros((len(traj), 0), dtype=np.float32)
alpha_indices = np.array(
[(ca[i - 1], ca[i], ca[i + 1], ca[i + 2]) for i in range(1, len(ca) - 2)])
result = md.compute_dihedrals(traj, alpha_indices)
x = []
if self.atom_indices is None:
self.atom_indices = np.vstack(alpha_indices)
if self.sincos:
x.extend([np.cos(result), np.sin(result)])
else:
x.append(result)
return np.hstack(x) | Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles of alpha carbon backbone
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory. |
def _InitializeURL(self, upload_url, current_content_length):
"""Ensures that the URL used to upload operations is properly initialized.
Args:
upload_url: a string url.
current_content_length: an integer identifying the current content length
of data uploaded to the Batch Job.
Returns:
An initialized string URL, or the provided string URL if the URL has
already been initialized.
"""
# If initialization is not necessary, return the provided upload_url.
if current_content_length != 0:
return upload_url
headers = {
'Content-Type': 'application/xml',
'Content-Length': 0,
'x-goog-resumable': 'start'
}
# Send an HTTP POST request to the given upload_url
req = urllib2.Request(upload_url, data={}, headers=headers)
resp = self._url_opener.open(req)
return resp.headers['location'] | Ensures that the URL used to upload operations is properly initialized.
Args:
upload_url: a string url.
current_content_length: an integer identifying the current content length
of data uploaded to the Batch Job.
Returns:
An initialized string URL, or the provided string URL if the URL has
already been initialized. |
def generate_template(template_name, **context):
"""Load and generate a template."""
context.update(href=href, format_datetime=format_datetime)
return template_loader.load(template_name).generate(**context) | Load and generate a template. |
def _sample_action_fluent(self,
name: str,
dtype: tf.DType,
size: Sequence[int],
constraints: Dict[str, Constraints],
default_value: tf.Tensor,
prob: float) -> tf.Tensor:
'''Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent.
'''
shape = [self.batch_size] + list(size)
if dtype == tf.float32:
bounds = constraints.get(name)
if bounds is None:
low, high = -self.MAX_REAL_VALUE, self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
sampled_fluent = dist.sample(shape)
else:
low, high = bounds
batch = (low is not None and low.batch) or (high is not None and high.batch)
low = tf.cast(low.tensor, tf.float32) if low is not None else -self.MAX_REAL_VALUE
high = tf.cast(high.tensor, tf.float32) if high is not None else self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
if batch:
sampled_fluent = dist.sample()
elif isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor):
if (low+high).shape.as_list() == list(size):
sampled_fluent = dist.sample([self.batch_size])
else:
raise ValueError('bounds are not compatible with action fluent.')
else:
sampled_fluent = dist.sample(shape)
elif dtype == tf.int32:
logits = [1.0] * self.MAX_INT_VALUE
dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)
sampled_fluent = dist.sample(shape)
elif dtype == tf.bool:
probs = 0.5
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
sampled_fluent = dist.sample(shape)
select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)
action_fluent = tf.where(select_default, default_value, sampled_fluent)
return action_fluent | Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent. |
def add_value(self, value, row, col):
"""
Adds a single value (cell) to a worksheet at (row, col).
Return the (row, col) where the value has been put.
:param value: Value to write to the sheet.
:param row: Row where the value should be written.
:param col: Column where the value should be written.
"""
self.__values[(row, col)] = value | Adds a single value (cell) to a worksheet at (row, col).
Return the (row, col) where the value has been put.
:param value: Value to write to the sheet.
:param row: Row where the value should be written.
:param col: Column where the value should be written. |
def assign(pid_type, pid_value, status, object_type, object_uuid, overwrite):
"""Assign persistent identifier."""
from .models import PersistentIdentifier
obj = PersistentIdentifier.get(pid_type, pid_value)
if status is not None:
obj.status = status
obj.assign(object_type, object_uuid, overwrite=overwrite)
db.session.commit()
click.echo(obj.status) | Assign persistent identifier. |
def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given Project
"""
data = list_records_for_project_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | List all BuildRecords for a given Project |
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if inspect.ismethod(member):
member = member.__func__
if inspect.isfunction(member):
_build_from_function(node, name, member, self._module)
elif inspect.isbuiltin(member):
if not _io_discrepancy(member) and self.imported_member(
node, member, name
):
continue
object_build_methoddescriptor(node, member, name)
elif inspect.isclass(member):
if self.imported_member(node, member, name):
continue
if member in self._done:
class_node = self._done[member]
if class_node not in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
if name == "__class__" and class_node.parent is None:
class_node.parent = self._done[self._module]
elif inspect.ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif inspect.isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, _CONSTANTS):
attach_const_node(node, name, member)
elif inspect.isroutine(member):
# This should be called for Jython, where some builtin
# methods aren't caught by isbuiltin branch.
_build_from_function(node, name, member, self._module)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
return None | recursive method which create a partial ast from real objects
(only function, class, and method are handled) |
def wrap_cell(entity, json_obj, mapping, table_view=False):
'''
Cell wrappers
for customizing the GUI data table
TODO : must coincide with hierarchy!
TODO : simplify this!
'''
html_class = '' # for GUI javascript
out = ''
#if 'cell_wrapper' in entity: # TODO : this bound type was defined by apps only
# out = entity['cell_wrapper'](json_obj)
#else:
if entity['multiple']:
out = ", ".join( map(lambda x: num2name(x, entity, mapping), json_obj.get(entity['source'], [])) )
elif entity['is_chem_formula']:
out = html_formula(json_obj[ entity['source'] ]) if entity['source'] in json_obj else '—'
elif entity['source'] == 'bandgap':
html_class = ' class=_g'
out = json_obj.get('bandgap')
if out is None: out = '—'
# dynamic determination below:
elif entity['source'] == 'energy':
html_class = ' class=_e'
out = "%6.5f" % json_obj['energy'] if json_obj['energy'] else '—'
elif entity['source'] == 'dims':
out = "%4.2f" % json_obj['dims'] if json_obj['periodicity'] in [2, 3] else '—'
else:
out = num2name(json_obj.get(entity['source']), entity, mapping) or '—'
if table_view:
return '<td rel=' + str(entity['cid']) + html_class + '>' + str(out) + '</td>'
elif html_class:
return '<span' + html_class + '>' + str(out) + '</span>'
return str(out) | Cell wrappers
for customizing the GUI data table
TODO : must coincide with hierarchy!
TODO : simplify this! |
def collection(self, collection_name):
"""
implements Requirement 15 (/req/core/sfc-md-op)
@type collection_name: string
@param collection_name: name of collection
@returns: feature collection metadata
"""
path = 'collections/{}'.format(collection_name)
url = self._build_url(path)
LOGGER.debug('Request: {}'.format(url))
response = requests.get(url, headers=REQUEST_HEADERS).json()
return response | implements Requirement 15 (/req/core/sfc-md-op)
@type collection_name: string
@param collection_name: name of collection
@returns: feature collection metadata |
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
"""
Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
return d | Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level |
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role) | Returns the indefinite (ein) or definite (der/die/das/die) article for the given word. |
def tag(self, text):
"""Retrieves list of events in the text.
Parameters
----------
text: Text
The text to search for events.
Returns
-------
list of events sorted by start, end
"""
if self.search_method == 'ahocorasick':
events = self._find_events_ahocorasick(text.text)
elif self.search_method == 'naive':
events = self._find_events_naive(text.text)
events = self._resolve_conflicts(events)
self._event_intervals(events, text)
if self.return_layer:
return events
else:
text[self.layer_name] = events | Retrieves list of events in the text.
Parameters
----------
text: Text
The text to search for events.
Returns
-------
list of events sorted by start, end |
def produce_frequency_explorer(corpus,
category,
category_name=None,
not_category_name=None,
term_ranker=termranking.AbsoluteFrequencyRanker,
alpha=0.01,
use_term_significance=False,
term_scorer=None,
not_categories=None,
grey_threshold=1.96,
y_axis_values=None,
frequency_transform=lambda x: scale(np.log(x) - np.log(1)),
**kwargs):
'''
Produces a Monroe et al. style visualization, with the x-axis being the log frequency
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str or None
Name of category to use. E.g., "5-star reviews."
Defaults to category
not_category_name : str or None
Name of everything that isn't in category. E.g., "Below 5-star reviews".
Defaults to "Not " + category_name
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
use_term_significance : bool, True by default
Use term scorer
term_scorer : TermSignificance
Subclass of TermSignificance to use as for scores and significance
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
grey_threshold : float
Score to grey points. Default is 1.96
y_axis_values : list
Custom y-axis values. Defaults to linspace
frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))
Takes a vector of frequencies and returns their x-axis scale.
Remaining arguments are from `produce_scattertext_explorer`.'
Returns
-------
str, html of visualization
'''
if not_categories is None:
not_categories = [c for c in corpus.get_categories() if c != category]
if term_scorer is None:
term_scorer = LogOddsRatioUninformativeDirichletPrior(alpha)
my_term_ranker = term_ranker(corpus)
if kwargs.get('use_non_text_features', False):
my_term_ranker.use_non_text_features()
term_freq_df = my_term_ranker.get_ranks() + 1
freqs = term_freq_df[[c + ' freq' for c in [category] + not_categories]].sum(axis=1).values
x_axis_values = [round_downer(10 ** x) for x
in np.linspace(0, np.log(freqs.max()) / np.log(10), 5)]
x_axis_values = [x for x in x_axis_values if x > 1 and x <= freqs.max()]
# y_axis_values = [-2.58, -1.96, 0, 1.96, 2.58]
frequencies_log_scaled = frequency_transform(freqs) # scale(np.log(freqs) - np.log(1))
if 'scores' not in kwargs:
kwargs['scores'] = get_term_scorer_scores(category,
corpus,
kwargs.get('neutral_categories', False),
not_categories,
kwargs.get('show_neutral', False),
term_ranker,
term_scorer,
kwargs.get('use_non_text_features', False))
def y_axis_rescale(coords):
return ((coords - 0.5) / (np.abs(coords - 0.5).max()) + 1) / 2
# from https://stackoverflow.com/questions/3410976/how-to-round-a-number-to-significant-figures-in-python
def round_to_1(x):
if x == 0:
return 0
return round(x, -int(np.floor(np.log10(abs(x)))))
if y_axis_values is None:
max_score = np.floor(np.max(kwargs['scores']) * 100) / 100
min_score = np.ceil(np.min(kwargs['scores']) * 100) / 100
if min_score < 0 and max_score > 0:
central = 0
else:
central = 0.5
y_axis_values = [x for x in [min_score, central, max_score]
if x >= min_score and x <= max_score]
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(kwargs['scores'])
if use_term_significance:
kwargs['term_significance'] = term_scorer
kwargs['y_label'] = kwargs.get('y_label', term_scorer.get_name())
kwargs['color_func'] = kwargs.get('color_func', '''(function(d) {
return (Math.abs(d.os) < %s)
? d3.interpolate(d3.rgb(230, 230, 230), d3.rgb(130, 130, 130))(Math.abs(d.os)/%s)
: d3.interpolateRdYlBu(d.y);
})''' % (grey_threshold, grey_threshold))
return produce_scattertext_explorer(corpus,
category=category,
category_name=category_name,
not_category_name=not_category_name,
x_coords=frequencies_log_scaled,
y_coords=scores_scaled_for_charting,
original_x=freqs,
original_y=kwargs['scores'],
x_axis_values=x_axis_values,
y_axis_values=y_axis_values,
rescale_x=scale,
rescale_y=y_axis_rescale,
sort_by_dist=False,
term_ranker=term_ranker,
not_categories=not_categories,
x_label=kwargs.get('x_label', 'Log Frequency'),
**kwargs) | Produces a Monroe et al. style visualization, with the x-axis being the log frequency
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str or None
Name of category to use. E.g., "5-star reviews."
Defaults to category
not_category_name : str or None
Name of everything that isn't in category. E.g., "Below 5-star reviews".
Defaults to "Not " + category_name
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
use_term_significance : bool, True by default
Use term scorer
term_scorer : TermSignificance
Subclass of TermSignificance to use as for scores and significance
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
grey_threshold : float
Score to grey points. Default is 1.96
y_axis_values : list
Custom y-axis values. Defaults to linspace
frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))
Takes a vector of frequencies and returns their x-axis scale.
Remaining arguments are from `produce_scattertext_explorer`.'
Returns
-------
str, html of visualization |
def get_region (self, rs,cs, re,ce):
'''This returns a list of lines representing the region.
'''
rs = constrain (rs, 1, self.rows)
re = constrain (re, 1, self.rows)
cs = constrain (cs, 1, self.cols)
ce = constrain (ce, 1, self.cols)
if rs > re:
rs, re = re, rs
if cs > ce:
cs, ce = ce, cs
sc = []
for r in range (rs, re+1):
line = u''
for c in range (cs, ce + 1):
ch = self.get_abs (r,c)
line = line + ch
sc.append (line)
return sc | This returns a list of lines representing the region. |
def key_value_pairs(self):
"""
convert list to key value pairs
This should also create unique id's to allow for any
dataset to be transposed, and then later manipulated
r1c1,r1c2,r1c3
r2c1,r2c2,r2c3
should be converted to
ID COLNUM VAL
r1c1,
"""
self.op_data = []
hdrs = self.ip_data[0]
for row in self.ip_data[1:]:
id_col = row[0]
for col_num, col in enumerate(row):
self.op_data.append([id_col, hdrs[col_num], col]) | convert list to key value pairs
This should also create unique id's to allow for any
dataset to be transposed, and then later manipulated
r1c1,r1c2,r1c3
r2c1,r2c2,r2c3
should be converted to
ID COLNUM VAL
r1c1, |
def to_automaton_(f, labels:Set[Symbol]=None):
"""
DEPRECATED
From a LDLfFormula, build the automaton.
:param f: a LDLfFormula;
:param labels: a set of Symbol, the fluents of our domain. If None, retrieve them from the formula;
:param determinize: True if you need to determinize the NFA, obtaining a DFA;
:param minimize: True if you need to minimize the DFA (if determinize is False this flag has no effect.)
:return: a NFA or a DFA which accepts the same traces that makes the formula True.
"""
nnf = f.to_nnf()
if labels is None:
# if the labels of the formula are not specified in input,
# retrieve them from the formula
labels = nnf.find_labels()
# the alphabet is the powerset of the set of fluents
alphabet = powerset(labels)
initial_state = MacroState({nnf})
final_states = {MacroState()}
delta = set()
d = f.delta(PLFalseInterpretation(), epsilon=True)
if d.truth(d):
final_states.add(initial_state)
states = {MacroState(), initial_state}
states_changed, delta_changed = True, True
while states_changed or delta_changed:
states_changed, delta_changed = False, False
for actions_set in alphabet:
states_list = list(states)
for q in states_list:
# delta function applied to every formula in the macro state Q
delta_formulas = [f.delta(actions_set) for f in q]
# find the list of atoms, which are "true" atoms (i.e. propositional atoms) or LDLf formulas
atomics = [s for subf in delta_formulas for s in find_atomics(subf)]
# "freeze" the found atoms as symbols and build a mapping from symbols to formulas
symbol2formula = {Symbol(str(f)): f for f in atomics if f != PLTrue() and f != PLFalse()}
# build a map from formula to a "freezed" propositional Atomic Formula
formula2atomic_formulas = {
f: PLAtomic(Symbol(str(f)))
if f != PLTrue() and f != PLFalse()# and not isinstance(f, PLAtomic)
else f for f in atomics
}
# the final list of Propositional Atomic Formulas, one for each formula in the original macro state Q
transformed_delta_formulas = [_transform_delta(f, formula2atomic_formulas) for f in delta_formulas]
# the empty conjunction stands for true
if len(transformed_delta_formulas) == 0:
conjunctions = PLTrue()
elif len(transformed_delta_formulas) == 1:
conjunctions = transformed_delta_formulas[0]
else:
conjunctions = PLAnd(transformed_delta_formulas)
# the model in this case is the smallest set of symbols s.t. the conjunction of "freezed" atomic formula
# is true.
models = frozenset(conjunctions.minimal_models(Alphabet(symbol2formula)))
if len(models) == 0:
continue
for min_model in models:
q_prime = MacroState(
{symbol2formula[s] for s in min_model.true_propositions})
len_before = len(states)
states.add(q_prime)
if len(states) == len_before + 1:
states_list.append(q_prime)
states_changed = True
len_before = len(delta)
delta.add((q, actions_set, q_prime))
if len(delta) == len_before + 1:
delta_changed = True
# check if q_prime should be added as final state
if len(q_prime) == 0:
final_states.add(q_prime)
else:
subf_deltas = [subf.delta(PLFalseInterpretation(), epsilon=True) for subf in q_prime]
if len(subf_deltas)==1:
q_prime_delta_conjunction = subf_deltas[0]
else:
q_prime_delta_conjunction = PLAnd(subf_deltas)
if q_prime_delta_conjunction.truth(PLFalseInterpretation()):
final_states.add(q_prime)
alphabet = PythomataAlphabet({PLInterpretation(set(sym)) for sym in alphabet})
delta = frozenset((i, PLInterpretation(set(a)), o) for i, a, o in delta)
nfa = NFA.fromTransitions(
alphabet=alphabet,
states=frozenset(states),
initial_state=initial_state,
accepting_states=frozenset(final_states),
transitions=delta
)
return nfa | DEPRECATED
From a LDLfFormula, build the automaton.
:param f: a LDLfFormula;
:param labels: a set of Symbol, the fluents of our domain. If None, retrieve them from the formula;
:param determinize: True if you need to determinize the NFA, obtaining a DFA;
:param minimize: True if you need to minimize the DFA (if determinize is False this flag has no effect.)
:return: a NFA or a DFA which accepts the same traces that makes the formula True. |
def Compile(self, filter_implementation):
"""Compile the binary expression into a filter object."""
operator = self.operator.lower()
if operator in ('and', '&&'):
method = 'AndFilter'
elif operator in ('or', '||'):
method = 'OrFilter'
else:
raise errors.ParseError(
'Invalid binary operator {0:s}'.format(operator))
args = [x.Compile(filter_implementation) for x in self.args]
return getattr(filter_implementation, method)(*args) | Compile the binary expression into a filter object. |
def _read(self):
"""Open the file and return its contents."""
with open(self.path, 'r') as file_handle:
content = file_handle.read()
# Py27 INI config parser chokes if the content provided is not unicode.
# All other versions seems to work appropriately. Forcing the value to
# unicode here in order to resolve this issue.
return compat.unicode(content) | Open the file and return its contents. |
def record_variant_id(record):
"""Get variant ID from pyvcf.model._Record"""
if record.ID:
return record.ID
else:
return record.CHROM + ':' + str(record.POS) | Get variant ID from pyvcf.model._Record |
def list(self,table, **kparams):
"""
get a collection of records by table name.
returns a dict (the json map) for python 3.4
"""
result = self.table_api_get(table, **kparams)
return self.to_records(result, table) | get a collection of records by table name.
returns a dict (the json map) for python 3.4 |
def build_expressions(verb):
"""
Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`.
"""
def partial(func, col, *args, **kwargs):
"""
Make a function that acts on a column in a dataframe
Parameters
----------
func : callable
Function
col : str
Column
args : tuple
Arguments to pass to func
kwargs : dict
Keyword arguments to func
Results
-------
new_func : callable
Function that takes a dataframe, and calls the
original function on a column in the dataframe.
"""
def new_func(gdf):
return func(gdf[col], *args, **kwargs)
return new_func
def make_statement(func, col):
"""
A statement of function called on a column in a dataframe
Parameters
----------
func : str or callable
Function to call on a dataframe column
col : str
Column
"""
if isinstance(func, str):
expr = '{}({})'.format(func, col)
elif callable(func):
expr = partial(func, col, *verb.args, **verb.kwargs)
else:
raise TypeError("{} is not a function".format(func))
return expr
def func_name(func):
"""
Return name of a function.
If the function is `np.sin`, we return `sin`.
"""
if isinstance(func, str):
return func
try:
return func.__name__
except AttributeError:
return ''
# Generate function names. They act as identifiers (postfixed
# to the original columns) in the new_column names.
if isinstance(verb.functions, (tuple, list)):
names = (func_name(func) for func in verb.functions)
names_and_functions = zip(names, verb.functions)
else:
names_and_functions = verb.functions.items()
# Create statements for the expressions
# and postfix identifiers
columns = Selector.get(verb) # columns to act on
postfixes = []
stmts = []
for name, func in names_and_functions:
postfixes.append(name)
for col in columns:
stmts.append(make_statement(func, col))
if not stmts:
stmts = columns
# Names of the new columns
# e.g col1_mean, col2_mean, col1_std, col2_std
add_postfix = (isinstance(verb.functions, dict) or
len(verb.functions) > 1)
if add_postfix:
fmt = '{}_{}'.format
new_columns = [fmt(c, p) for p in postfixes for c in columns]
else:
new_columns = columns
expressions = [Expression(stmt, col)
for stmt, col in zip(stmts, new_columns)]
return expressions, new_columns | Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`. |
def Ainv(self):
'Returns a Solver instance'
if getattr(self, '_Ainv', None) is None:
self._Ainv = self.Solver(self.A, 13)
self._Ainv.run_pardiso(12)
return self._Ainv | Returns a Solver instance |
def invite(self, username):
"""Invite the user to join this team.
This returns a dictionary like so::
{'state': 'pending', 'url': 'https://api.github.com/teams/...'}
:param str username: (required), user to invite to join this team.
:returns: dictionary
"""
url = self._build_url('memberships', username, base_url=self._api)
return self._json(self._put(url), 200) | Invite the user to join this team.
This returns a dictionary like so::
{'state': 'pending', 'url': 'https://api.github.com/teams/...'}
:param str username: (required), user to invite to join this team.
:returns: dictionary |
def get_all_instances(sql, class_type, *args, **kwargs):
"""Returns a list of instances of class_type populated with attributes from the DB record
@param sql: Sql statement to execute
@param class_type: The type of class to instantiate and populate with DB record
@return: Return a list of instances with attributes set to values from DB
"""
records = CoyoteDb.get_all_records(sql, *args, **kwargs)
instances = [CoyoteDb.get_object_from_dictionary_representation(
dictionary=record, class_type=class_type) for record in records]
for instance in instances:
instance._query = sql
return instances | Returns a list of instances of class_type populated with attributes from the DB record
@param sql: Sql statement to execute
@param class_type: The type of class to instantiate and populate with DB record
@return: Return a list of instances with attributes set to values from DB |
def pack_rgb(rgb):
'''Packs a 24-bit RGB triples into a single integer,
works on both arrays and tuples.'''
orig_shape = None
if isinstance(rgb, np.ndarray):
assert rgb.shape[-1] == 3
orig_shape = rgb.shape[:-1]
else:
assert len(rgb) == 3
rgb = np.array(rgb)
rgb = rgb.astype(int).reshape((-1, 3))
packed = (rgb[:, 0] << 16 |
rgb[:, 1] << 8 |
rgb[:, 2])
if orig_shape is None:
return packed
else:
return packed.reshape(orig_shape) | Packs a 24-bit RGB triples into a single integer,
works on both arrays and tuples. |
def make_format(format_spec):
"""Build format string from a format specification.
:param format_spec: Format specification (as FormatSpec object).
:return: Composed format (as string).
"""
fill = ''
align = ''
zero = ''
width = format_spec.width
if format_spec.align:
align = format_spec.align[0]
if format_spec.fill:
fill = format_spec.fill[0]
if format_spec.zero:
zero = '0'
precision_part = ""
if format_spec.precision:
precision_part = ".%s" % format_spec.precision
# -- FORMAT-SPEC: [[fill]align][0][width][.precision][type]
return "%s%s%s%s%s%s" % (fill, align, zero, width,
precision_part, format_spec.type) | Build format string from a format specification.
:param format_spec: Format specification (as FormatSpec object).
:return: Composed format (as string). |
def url(self):
"""
Returns the URL for the instance, which can be used
to retrieve, delete, and overwrite the file. If security is enabled, signature and policy parameters will
be included,
*returns* [String]
```python
filelink = client.upload(filepath='/path/to/file')
filelink.url
# https://cdn.filestackcontent.com/FILE_HANDLE
```
"""
return get_url(CDN_URL, handle=self.handle, security=self.security) | Returns the URL for the instance, which can be used
to retrieve, delete, and overwrite the file. If security is enabled, signature and policy parameters will
be included,
*returns* [String]
```python
filelink = client.upload(filepath='/path/to/file')
filelink.url
# https://cdn.filestackcontent.com/FILE_HANDLE
``` |
def _handle_invalid_read_response(self, res, expected_len):
"""
This function is called when we do not get the expected frame header in
response to a command. Probable reason is that we are not talking to a
YubiHSM in HSM mode (might be a modem, or a YubiHSM in configuration mode).
Throws a hopefully helpful exception.
"""
if not res:
reset(self.stick)
raise pyhsm.exception.YHSM_Error('YubiHSM did not respond to command %s' \
% (pyhsm.defines.cmd2str(self.command)) )
# try to check if it is a YubiHSM in configuration mode
self.stick.write('\r\r\r', '(mode test)')
res2 = self.stick.read(50) # expect a timeout
lines = res2.split('\n')
for this in lines:
if re.match('^(NO_CFG|WSAPI|HSM).*> .*', this):
raise pyhsm.exception.YHSM_Error('YubiHSM is in configuration mode')
raise pyhsm.exception.YHSM_Error('Unknown response from serial device %s : "%s"' \
% (self.stick.device, res.encode('hex'))) | This function is called when we do not get the expected frame header in
response to a command. Probable reason is that we are not talking to a
YubiHSM in HSM mode (might be a modem, or a YubiHSM in configuration mode).
Throws a hopefully helpful exception. |
def confirm_build(build_url, keeper_token):
"""Confirm a build upload is complete.
Wraps ``PATCH /builds/{build}``.
Parameters
----------
build_url : `str`
URL of the build resource. Given a build resource, this URL is
available from the ``self_url`` field.
keeper_token : `str`
Auth token (`ltdconveyor.keeper.get_keeper_token`).
Raises
------
ltdconveyor.keeper.KeeperError
Raised if there is an error communicating with the LTD Keeper API.
"""
data = {
'uploaded': True
}
r = requests.patch(
build_url,
auth=(keeper_token, ''),
json=data)
if r.status_code != 200:
raise KeeperError(r) | Confirm a build upload is complete.
Wraps ``PATCH /builds/{build}``.
Parameters
----------
build_url : `str`
URL of the build resource. Given a build resource, this URL is
available from the ``self_url`` field.
keeper_token : `str`
Auth token (`ltdconveyor.keeper.get_keeper_token`).
Raises
------
ltdconveyor.keeper.KeeperError
Raised if there is an error communicating with the LTD Keeper API. |
def open_stored_file(value, url):
"""
Deserialize value for a given upload url and return open file.
Returns None if deserialization fails.
"""
upload = None
result = deserialize_upload(value, url)
filename = result['name']
storage_class = result['storage']
if storage_class and filename:
storage = storage_class()
if storage.exists(filename):
upload = storage.open(filename)
upload.name = os.path.basename(filename)
return upload | Deserialize value for a given upload url and return open file.
Returns None if deserialization fails. |
def add(self, method, pattern, callback):
"""Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*.
"""
pat_type, pat = self._normalize_pattern(pattern)
if pat_type == 'literal':
self._literal[method][pat] = callback
elif pat_type == 'wildcard':
self._wildcard[method].append(WildcardRoute(pat, callback))
else:
self._regex[method].append(RegexRoute(pat, callback)) | Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*. |
def _process_out_of_bounds(self, value, start, end):
"Clips out of bounds values"
if isinstance(value, np.datetime64):
v = dt64_to_dt(value)
if isinstance(start, (int, float)):
start = convert_timestamp(start)
if isinstance(end, (int, float)):
end = convert_timestamp(end)
s, e = start, end
if isinstance(s, np.datetime64):
s = dt64_to_dt(s)
if isinstance(e, np.datetime64):
e = dt64_to_dt(e)
else:
v, s, e = value, start, end
if v < s:
value = start
elif v > e:
value = end
return value | Clips out of bounds values |
def core_periphery_dir(W, gamma=1, C0=None, seed=None):
'''
The optimal core/periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes, a core group and a periphery
group. The number of core-group edges is maximized, and the number of
within periphery edges is minimized.
The core-ness is a statistic which quantifies the goodness of the
optimal core/periphery subdivision (with arbitrary relative value).
The algorithm uses a variation of the Kernighan-Lin graph partitioning
algorithm to optimize a core-structure objective described in
Borgatti & Everett (2000) Soc Networks 21:375-395
See Rubinov, Ypma et al. (2015) PNAS 112:10032-7
Parameters
----------
W : NxN np.ndarray
directed connection matrix
gamma : core-ness resolution parameter
Default value = 1
gamma > 1 detects small core, large periphery
0 < gamma < 1 detects large core, small periphery
C0 : NxN np.ndarray
Initial core structure
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
'''
rng = get_rng(seed)
n = len(W)
np.fill_diagonal(W, 0)
if C0 == None:
C = rng.randint(2, size=(n,))
else:
C = C0.copy()
#methodological note, the core-detection null model is not corrected
#for degree cf community detection (to enable detection of hubs)
s = np.sum(W)
p = np.mean(W)
b = W - gamma * p
B = (b + b.T) / (2 * s)
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = np.sum(B[np.ix_(cix, cix)]) - np.sum(B[np.ix_(ncix, ncix)])
#sqish
flag = True
it = 0
while flag:
it += 1
if it > 100:
raise BCTParamError('Infinite Loop aborted')
flag = False
#initial node indices
ixes = np.arange(n)
Ct = C.copy()
while len(ixes) > 0:
Qt = np.zeros((n,))
ctix, = np.where(Ct)
nctix, = np.where(np.logical_not(Ct))
q0 = (np.sum(B[np.ix_(ctix, ctix)]) -
np.sum(B[np.ix_(nctix, nctix)]))
Qt[ctix] = q0 - 2 * np.sum(B[ctix, :], axis=1)
Qt[nctix] = q0 + 2 * np.sum(B[nctix, :], axis=1)
max_Qt = np.max(Qt[ixes])
u, = np.where(np.abs(Qt[ixes]-max_Qt) < 1e-10)
#tunourn
u = u[rng.randint(len(u))]
Ct[ixes[u]] = np.logical_not(Ct[ixes[u]])
#casga
ixes = np.delete(ixes, u)
if max_Qt - q > 1e-10:
flag = True
C = Ct.copy()
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = (np.sum(B[np.ix_(cix, cix)]) -
np.sum(B[np.ix_(ncix, ncix)]))
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = np.sum(B[np.ix_(cix, cix)]) - np.sum(B[np.ix_(ncix, ncix)])
return C, q | The optimal core/periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes, a core group and a periphery
group. The number of core-group edges is maximized, and the number of
within periphery edges is minimized.
The core-ness is a statistic which quantifies the goodness of the
optimal core/periphery subdivision (with arbitrary relative value).
The algorithm uses a variation of the Kernighan-Lin graph partitioning
algorithm to optimize a core-structure objective described in
Borgatti & Everett (2000) Soc Networks 21:375-395
See Rubinov, Ypma et al. (2015) PNAS 112:10032-7
Parameters
----------
W : NxN np.ndarray
directed connection matrix
gamma : core-ness resolution parameter
Default value = 1
gamma > 1 detects small core, large periphery
0 < gamma < 1 detects large core, small periphery
C0 : NxN np.ndarray
Initial core structure
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value. |
def copyidfobject(self, idfobject):
"""Add an IDF object to the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove. This usually comes from another idf file,
or it can be used to copy within this idf file.
"""
return addthisbunch(self.idfobjects,
self.model,
self.idd_info,
idfobject, self) | Add an IDF object to the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove. This usually comes from another idf file,
or it can be used to copy within this idf file. |
def matrix_multiply(m1, m2):
""" Matrix multiplication (iterative algorithm).
The running time of the iterative matrix multiplication algorithm is :math:`O(n^{3})`.
:param m1: 1st matrix with dimensions :math:`(n \\times p)`
:type m1: list, tuple
:param m2: 2nd matrix with dimensions :math:`(p \\times m)`
:type m2: list, tuple
:return: resultant matrix with dimensions :math:`(n \\times m)`
:rtype: list
"""
mm = [[0.0 for _ in range(len(m2[0]))] for _ in range(len(m1))]
for i in range(len(m1)):
for j in range(len(m2[0])):
for k in range(len(m2)):
mm[i][j] += float(m1[i][k] * m2[k][j])
return mm | Matrix multiplication (iterative algorithm).
The running time of the iterative matrix multiplication algorithm is :math:`O(n^{3})`.
:param m1: 1st matrix with dimensions :math:`(n \\times p)`
:type m1: list, tuple
:param m2: 2nd matrix with dimensions :math:`(p \\times m)`
:type m2: list, tuple
:return: resultant matrix with dimensions :math:`(n \\times m)`
:rtype: list |
def download(self, torrent_id, directory, filename) :
""" Download a torrent """
return self.call('torrents/download/%s' % torrent_id, params={'filename' : filename, 'directory' : directory}) | Download a torrent |
def read_file(self, filename):
"""
Read a text file and provide feedback to the user.
:param filename: The pathname of the file to read (a string).
:returns: The contents of the file (a string).
"""
logger.info("Reading file: %s", format_path(filename))
contents = self.context.read_file(filename)
num_lines = len(contents.splitlines())
logger.debug("Read %s from %s.",
pluralize(num_lines, 'line'),
format_path(filename))
return contents.rstrip() | Read a text file and provide feedback to the user.
:param filename: The pathname of the file to read (a string).
:returns: The contents of the file (a string). |
def main():
""" main function """
## parse params file input (returns to stdout if --help or --version)
args = parse_command_line()
print(HEADER.format(ip.__version__))
## set random seed
np.random.seed(args.rseed)
## debugger----------------------------------------
if os.path.exists(ip.__debugflag__):
os.remove(ip.__debugflag__)
if args.debug:
print("\n ** Enabling debug mode ** ")
ip._debug_on()
## if JSON, load existing Tetrad analysis -----------------------
if args.json:
data = ipa.tetrad(name=args.name, workdir=args.workdir, load=True)
## if force then remove all results
if args.force:
data._refresh()
## else create a new tmp assembly for the seqarray-----------------
else:
## create new Tetrad class Object if it doesn't exist
newjson = os.path.join(args.workdir, args.name+'.tet.json')
## if not quiet...
print("tetrad instance: {}".format(args.name))
if (not os.path.exists(newjson)) or args.force:
## purge any files associated with this name if forced
if args.force:
## init an object in the correct location just to refresh
ipa.tetrad(name=args.name,
workdir=args.workdir,
data=args.seq,
initarr=False,
save_invariants=args.invariants,
cli=True,
quiet=True)._refresh()
## create new tetrad object
data = ipa.tetrad(name=args.name,
workdir=args.workdir,
method=args.method,
data=args.seq,
resolve=args.resolve,
mapfile=args.map,
guidetree=args.tree,
nboots=args.boots,
nquartets=args.nquartets,
cli=True,
save_invariants=args.invariants,
)
else:
raise SystemExit(QUARTET_EXISTS\
.format(args.name, args.workdir, args.workdir, args.name, args.name))
## boots can be set either for a new object or loaded JSON to continue it
if args.boots:
data.params.nboots = int(args.boots)
## if ipyclient is running (and matched profile) then use that one
if args.ipcluster:
ipyclient = ipp.Client(profile=args.ipcluster)
data._ipcluster["cores"] = len(ipyclient)
## if not then we need to register and launch an ipcluster instance
else:
## set CLI ipcluster terms
ipyclient = None
data._ipcluster["cores"] = args.cores if args.cores else detect_cpus()
data._ipcluster["engines"] = "Local"
if args.MPI:
data._ipcluster["engines"] = "MPI"
if not args.cores:
raise IPyradWarningExit("must provide -c argument with --MPI")
## register to have a cluster-id with "ip- name"
data = register_ipcluster(data)
## message about whether we are continuing from existing
if data.checkpoint.boots:
print(LOADING_MESSAGE.format(
data.name, data.params.method, data.checkpoint.boots))
## run tetrad main function within a wrapper. The wrapper creates an
## ipyclient view and appends to the list of arguments to run 'run'.
data.run(force=args.force, ipyclient=ipyclient) | main function |
def answer(request):
"""
Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
}
"""
if request.method == 'GET':
return render(request, 'models_answer.html', {}, help_text=answer.__doc__)
elif request.method == 'POST':
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
saved_answers = _save_answers(request, practice_context, True)
return render_json(request, saved_answers, status=200, template='models_answer.html')
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
} |
def grating(period,
number_of_teeth,
fill_frac,
width,
position,
direction,
lda=1,
sin_theta=0,
focus_distance=-1,
focus_width=-1,
evaluations=99,
layer=0,
datatype=0):
'''
Straight or focusing grating.
period : grating period
number_of_teeth : number of teeth in the grating
fill_frac : filling fraction of the teeth (w.r.t. the period)
width : width of the grating
position : grating position (feed point)
direction : one of {'+x', '-x', '+y', '-y'}
lda : free-space wavelength
sin_theta : sine of incidence angle
focus_distance : focus distance (negative for straight grating)
focus_width : if non-negative, the focusing area is included in
the result (usually for negative resists) and this
is the width of the waveguide connecting to the
grating
evaluations : number of evaluations of `path.parametric`
layer : GDSII layer number
datatype : GDSII datatype number
Return `PolygonSet`
'''
if focus_distance < 0:
path = gdspy.L1Path(
(position[0] - 0.5 * width,
position[1] + 0.5 * (number_of_teeth - 1 + fill_frac) * period),
'+x',
period * fill_frac, [width], [],
number_of_teeth,
period,
layer=layer,
datatype=datatype)
else:
neff = lda / float(period) + sin_theta
qmin = int(focus_distance / float(period) + 0.5)
path = gdspy.Path(period * fill_frac, position)
max_points = 199 if focus_width < 0 else 2 * evaluations
c3 = neff**2 - sin_theta**2
w = 0.5 * width
for q in range(qmin, qmin + number_of_teeth):
c1 = q * lda * sin_theta
c2 = (q * lda)**2
path.parametric(
lambda t: (width * t - w, (c1 + neff * numpy.sqrt(c2 - c3 * (
width * t - w)**2)) / c3),
number_of_evaluations=evaluations,
max_points=max_points,
layer=layer,
datatype=datatype)
path.x = position[0]
path.y = position[1]
if focus_width >= 0:
path.polygons[0] = numpy.vstack(
(path.polygons[0][:evaluations, :],
([position] if focus_width == 0 else
[(position[0] + 0.5 * focus_width, position[1]),
(position[0] - 0.5 * focus_width, position[1])])))
path.fracture()
if direction == '-x':
return path.rotate(0.5 * numpy.pi, position)
elif direction == '+x':
return path.rotate(-0.5 * numpy.pi, position)
elif direction == '-y':
return path.rotate(numpy.pi, position)
else:
return path | Straight or focusing grating.
period : grating period
number_of_teeth : number of teeth in the grating
fill_frac : filling fraction of the teeth (w.r.t. the period)
width : width of the grating
position : grating position (feed point)
direction : one of {'+x', '-x', '+y', '-y'}
lda : free-space wavelength
sin_theta : sine of incidence angle
focus_distance : focus distance (negative for straight grating)
focus_width : if non-negative, the focusing area is included in
the result (usually for negative resists) and this
is the width of the waveguide connecting to the
grating
evaluations : number of evaluations of `path.parametric`
layer : GDSII layer number
datatype : GDSII datatype number
Return `PolygonSet` |
def from_value(self, instance, value):
"""
Convert the given value using the set `type_` and store it into
`instance`’ attribute.
"""
try:
parsed = self.type_.parse(value)
except (TypeError, ValueError):
if self.erroneous_as_absent:
return False
raise
self._set_from_recv(instance, parsed)
return True | Convert the given value using the set `type_` and store it into
`instance`’ attribute. |
def notify_event(self, session_info, topic):
"""
:type identifiers: SimpleIdentifierCollection
"""
try:
self.event_bus.sendMessage(topic, items=session_info)
except AttributeError:
msg = "Could not publish {} event".format(topic)
raise AttributeError(msg) | :type identifiers: SimpleIdentifierCollection |
def get_resource_listing(url, offset, limit, properties):
"""Gneric method to retrieve a resource listing from a SCO-API. Takes the
resource-specific API listing Url as argument.
Parameters
----------
url : string
Resource listing Url for a SCO-API
offset : int, optional
Starting offset for returned list items
limit : int, optional
Limit the number of items in the result
properties : List(string)
List of additional object properties to be included for items in
the result
Returns
-------
List(ResourceHandle)
List of resource handle (one per subject in the object listing)
"""
# Create listing query based on given arguments
query = [
QPARA_OFFSET + '=' + str(offset),
QPARA_LIMIT + '=' + str(limit)
]
# Add properties argument if property list is not None and not empty
if not properties is None:
if len(properties) > 0:
query.append(QPARA_ATTRIBUTES + '=' + ','.join(properties))
# Add query to Url.
url = url + '?' + '&'.join(query)
# Get subject listing Url for given SCO-API and decorate it with
# given listing arguments. Then retrieve listing from SCO-API.
json_obj = JsonResource(url).json
# Convert result into a list of resource handles and return the result
resources = []
for element in json_obj['items']:
resource = ResourceHandle(element)
# Add additional properties to resource if list is given
if not properties is None:
resource.properties = {}
for prop in properties:
if prop in element:
resource.properties[prop] = element[prop]
resources.append(resource)
return resources | Gneric method to retrieve a resource listing from a SCO-API. Takes the
resource-specific API listing Url as argument.
Parameters
----------
url : string
Resource listing Url for a SCO-API
offset : int, optional
Starting offset for returned list items
limit : int, optional
Limit the number of items in the result
properties : List(string)
List of additional object properties to be included for items in
the result
Returns
-------
List(ResourceHandle)
List of resource handle (one per subject in the object listing) |
def getDefaultApplicationForMimeType(self, pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen):
"""return the app key that will open this mime type"""
fn = self.function_table.getDefaultApplicationForMimeType
result = fn(pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen)
return result | return the app key that will open this mime type |
def point_cloud(df, columns=[0, 1, 2]):
"""3-D Point cloud for plotting things like mesh models of horses ;)"""
df = df if isinstance(df, pd.DataFrame) else pd.DataFrame(df)
if not all(c in df.columns for c in columns):
columns = list(df.columns)[:3]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d') # noqa
Axes3D.scatter(*[df[columns[i]] for i in range(3)], zdir='z', s=20, c=None, depthshade=True)
return ax | 3-D Point cloud for plotting things like mesh models of horses ;) |
def sheet2matrixidx(self,x,y):
"""
Convert a point (x,y) in sheet coordinates to the integer row
and column index of the matrix cell in which that point falls,
given a bounds and density. Returns (row,column).
Note that if coordinates along the right or bottom boundary
are passed into this function, the returned matrix coordinate
of the boundary will be just outside the matrix, because the
right and bottom boundaries are exclusive.
Valid for scalar or array x and y.
"""
r,c = self.sheet2matrix(x,y)
r = np.floor(r)
c = np.floor(c)
if hasattr(r,'astype'):
return r.astype(int), c.astype(int)
else:
return int(r),int(c) | Convert a point (x,y) in sheet coordinates to the integer row
and column index of the matrix cell in which that point falls,
given a bounds and density. Returns (row,column).
Note that if coordinates along the right or bottom boundary
are passed into this function, the returned matrix coordinate
of the boundary will be just outside the matrix, because the
right and bottom boundaries are exclusive.
Valid for scalar or array x and y. |
def plot(self, axis=None, node_size=40, node_color='k',
node_alpha=0.8, edge_alpha=0.5, edge_cmap='viridis_r',
edge_linewidth=2, vary_line_width=True, colorbar=True):
"""Plot the minimum spanning tree (as projected into 2D by t-SNE if required).
Parameters
----------
axis : matplotlib axis, optional
The axis to render the plot to
node_size : int, optional
The size of nodes in the plot (default 40).
node_color : matplotlib color spec, optional
The color to render nodes (default black).
node_alpha : float, optional
The alpha value (between 0 and 1) to render nodes with
(default 0.8).
edge_cmap : matplotlib colormap, optional
The colormap to color edges by (varying color by edge
weight/distance). Can be a cmap object or a string
recognised by matplotlib. (default `viridis_r`)
edge_alpha : float, optional
The alpha value (between 0 and 1) to render edges with
(default 0.5).
edge_linewidth : float, optional
The linewidth to use for rendering edges (default 2).
vary_line_width : bool, optional
Edge width is proportional to (log of) the inverse of the
mutual reachability distance. (default True)
colorbar : bool, optional
Whether to draw a colorbar. (default True)
Returns
-------
axis : matplotlib axis
The axis used the render the plot.
"""
try:
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
except ImportError:
raise ImportError('You must install the matplotlib library to plot the minimum spanning tree.')
if self._data.shape[0] > 32767:
warn('Too many data points for safe rendering of an minimal spanning tree!')
return None
if axis is None:
axis = plt.gca()
if self._data.shape[1] > 2:
# Get a 2D projection; if we have a lot of dimensions use PCA first
if self._data.shape[1] > 32:
# Use PCA to get down to 32 dimension
data_for_projection = PCA(n_components=32).fit_transform(self._data)
else:
data_for_projection = self._data.copy()
projection = TSNE().fit_transform(data_for_projection)
else:
projection = self._data.copy()
if vary_line_width:
line_width = edge_linewidth * (np.log(self._mst.T[2].max() / self._mst.T[2]) + 1.0)
else:
line_width = edge_linewidth
line_coords = projection[self._mst[:, :2].astype(int)]
line_collection = LineCollection(line_coords, linewidth=line_width,
cmap=edge_cmap, alpha=edge_alpha)
line_collection.set_array(self._mst[:, 2].T)
axis.add_artist(line_collection)
axis.scatter(projection.T[0], projection.T[1], c=node_color, alpha=node_alpha, s=node_size)
axis.set_xticks([])
axis.set_yticks([])
if colorbar:
cb = plt.colorbar(line_collection)
cb.ax.set_ylabel('Mutual reachability distance')
return axis | Plot the minimum spanning tree (as projected into 2D by t-SNE if required).
Parameters
----------
axis : matplotlib axis, optional
The axis to render the plot to
node_size : int, optional
The size of nodes in the plot (default 40).
node_color : matplotlib color spec, optional
The color to render nodes (default black).
node_alpha : float, optional
The alpha value (between 0 and 1) to render nodes with
(default 0.8).
edge_cmap : matplotlib colormap, optional
The colormap to color edges by (varying color by edge
weight/distance). Can be a cmap object or a string
recognised by matplotlib. (default `viridis_r`)
edge_alpha : float, optional
The alpha value (between 0 and 1) to render edges with
(default 0.5).
edge_linewidth : float, optional
The linewidth to use for rendering edges (default 2).
vary_line_width : bool, optional
Edge width is proportional to (log of) the inverse of the
mutual reachability distance. (default True)
colorbar : bool, optional
Whether to draw a colorbar. (default True)
Returns
-------
axis : matplotlib axis
The axis used the render the plot. |
def readBoolean(self):
"""
Read C{Boolean}.
@raise ValueError: Error reading Boolean.
@rtype: C{bool}
@return: A Boolean value, C{True} if the byte
is nonzero, C{False} otherwise.
"""
byte = self.stream.read(1)
if byte == '\x00':
return False
elif byte == '\x01':
return True
else:
raise ValueError("Error reading boolean") | Read C{Boolean}.
@raise ValueError: Error reading Boolean.
@rtype: C{bool}
@return: A Boolean value, C{True} if the byte
is nonzero, C{False} otherwise. |
def available(self, **kwargs):
"""
Find available dedicated numbers to buy. Returns dictionary like this:
::
{
"numbers": [
"12146124143",
"12172100315",
"12172100317",
"12172100319",
"12172100321",
"12172100323",
"12172100325",
"12172100326",
"12172100327",
"12172100328"
],
"price": 2.4
}
:Example:
numbers = client.numbers.available(country="US")
:param str country: Dedicated number country. Required.
:param str prefix: Desired number prefix. Should include country code (i.e. 447 for GB)
"""
uri = "%s/%s" % (self.uri, "available")
response, instance = self.request("GET", uri, params=kwargs)
return instance | Find available dedicated numbers to buy. Returns dictionary like this:
::
{
"numbers": [
"12146124143",
"12172100315",
"12172100317",
"12172100319",
"12172100321",
"12172100323",
"12172100325",
"12172100326",
"12172100327",
"12172100328"
],
"price": 2.4
}
:Example:
numbers = client.numbers.available(country="US")
:param str country: Dedicated number country. Required.
:param str prefix: Desired number prefix. Should include country code (i.e. 447 for GB) |
def parse():
"""parse arguments supplied by cmd-line
"""
parser = argparse.ArgumentParser(
description='BabelFy Entity Tagger',
formatter_class=argparse.RawTextHelpFormatter
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-t',
'--text',
help='text to be annotated by BabelFy API',
metavar='',
)
group.add_argument(
'-tf',
'--text-file',
help='path to the file containing the input text',
metavar='',
)
parser.add_argument(
'-key',
'--api-key',
help='BabelFy API key',
metavar='',
required=False,
)
parser.add_argument(
'-e',
'--entities',
help='get entity data',
required=False,
action='store_true',
)
parser.add_argument(
'-ae',
'--all-entities',
help='get entity and non-entity data',
required=False,
action='store_true',
)
parser.add_argument(
'-m',
'--merged-entities',
help='get merged entities only',
required=False,
action='store_true',
)
parser.add_argument(
'-am',
'--all-merged-entities',
help='get all merged entities',
required=False,
action='store_true',
)
parser.add_argument(
'-p',
'--print',
help='dump all babelfy data to stdout',
required=False,
action='store_true',
)
parser.add_argument(
'-ex',
'--export',
help='filename of the output file',
required=False,
metavar='',
)
return vars(parser.parse_args()) | parse arguments supplied by cmd-line |
def tplot_restore(filename):
"""
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
"""
#Error check
if not (os.path.isfile(filename)):
print("Not a valid file name")
return
#Check if the restored file was an IDL file
if filename.endswith('.tplot'):
temp_tplot = readsav(filename)
for i in range(len(temp_tplot['dq'])):
data_name = temp_tplot['dq'][i][0].decode("utf-8")
temp_x_data = temp_tplot['dq'][i][1][0][0]
#Pandas reads in data the other way I guess
if len(temp_tplot['dq'][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2])
else:
temp_y_data = temp_tplot['dq'][i][1][0][2]
#If there are more than 4 fields, that means it is a spectrogram
if len(temp_tplot['dq'][i][1][0]) > 4:
temp_v_data = temp_tplot['dq'][i][1][0][4]
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
if (temp_v_data.dtype.byteorder == '>'):
temp_v_data = temp_v_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data})
else:
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data})
if temp_tplot['dq'][i][3].dtype.names is not None:
for option_name in temp_tplot['dq'][i][3].dtype.names:
options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0])
data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist()
data_quants[data_name].dtype = temp_tplot['dq'][i][5]
data_quants[data_name].create_time = temp_tplot['dq'][i][6]
for option_name in temp_tplot['tv'][0][0].dtype.names:
if option_name == 'TRANGE':
tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'WSIZE':
tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'VAR_LABEL':
tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0])
if 'P' in temp_tplot['tv'][0][1].tolist():
for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names:
if option_name == 'TITLE':
tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0])
#temp_tplot['tv'][0][1] is all of the "settings" variables
#temp_tplot['tv'][0][1]['D'][0] is "device" options
#temp_tplot['tv'][0][1]['P'][0] is "plot" options
#temp_tplot['tv'][0][1]['X'][0] is x axis options
#temp_tplot['tv'][0][1]['Y'][0] is y axis options
####################################################################
else:
temp = pickle.load(open(filename,"rb"))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i+1].name] = temp[i+1]
tplot_opt_glob = temp[num_data_quants+1]
return | This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot') |
def _using_stdout(self):
"""
Return whether the handler is using sys.stdout.
"""
if WINDOWS and colorama:
# Then self.stream is an AnsiToWin32 object.
return self.stream.wrapped is sys.stdout
return self.stream is sys.stdout | Return whether the handler is using sys.stdout. |
def open(self, mode):
"""
Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options.
:type mode: str
"""
if mode == 'w':
return self.format.pipe_writer(AtomicFtpFile(self._fs, self.path))
elif mode == 'r':
temp_dir = os.path.join(tempfile.gettempdir(), 'luigi-contrib-ftp')
self.__tmp_path = temp_dir + '/' + self.path.lstrip('/') + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
# download file to local
self._fs.get(self.path, self.__tmp_path)
return self.format.pipe_reader(
FileWrapper(io.BufferedReader(io.FileIO(self.__tmp_path, 'r')))
)
else:
raise Exception("mode must be 'r' or 'w' (got: %s)" % mode) | Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options.
:type mode: str |
def get_bounds(self, bin_num):
"""Get the bonds of a bin, given its index `bin_num`.
:returns: a `Bounds` namedtuple with properties min and max
respectively.
"""
min_bound = (self.bin_size * bin_num) + self.min_value
max_bound = min_bound + self.bin_size
return self.Bounds(min_bound, max_bound) | Get the bonds of a bin, given its index `bin_num`.
:returns: a `Bounds` namedtuple with properties min and max
respectively. |
def compute_venn3_subsets(a, b, c):
'''
Given three set or Counter objects, computes the sizes of (a & ~b & ~c, ~a & b & ~c, a & b & ~c, ....),
as needed by the subsets parameter of venn3 and venn3_circles.
Returns the result as a tuple.
>>> compute_venn3_subsets(set([1,2,3]), set([2,3,4]), set([3,4,5,6]))
(1, 0, 1, 2, 0, 1, 1)
>>> compute_venn3_subsets(Counter([1,2,3]), Counter([2,3,4]), Counter([3,4,5,6]))
(1, 0, 1, 2, 0, 1, 1)
>>> compute_venn3_subsets(Counter([1,1,1]), Counter([1,1,1]), Counter([1,1,1,1]))
(0, 0, 0, 1, 0, 0, 3)
>>> compute_venn3_subsets(Counter([1,1,2,2,3,3]), Counter([2,2,3,3,4,4]), Counter([3,3,4,4,5,5,6,6]))
(2, 0, 2, 4, 0, 2, 2)
>>> compute_venn3_subsets(Counter([1,2,3]), Counter([2,2,3,3,4,4]), Counter([3,3,4,4,4,5,5,6]))
(1, 1, 1, 4, 0, 3, 1)
>>> compute_venn3_subsets(set([]), set([]), set([]))
(0, 0, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([1]), set([]), set([]))
(1, 0, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([]), set([1]), set([]))
(0, 1, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([]), set([]), set([1]))
(0, 0, 0, 1, 0, 0, 0)
>>> compute_venn3_subsets(Counter([]), Counter([]), Counter([1]))
(0, 0, 0, 1, 0, 0, 0)
>>> compute_venn3_subsets(set([1]), set([1]), set([1]))
(0, 0, 0, 0, 0, 0, 1)
>>> compute_venn3_subsets(set([1,3,5,7]), set([2,3,6,7]), set([4,5,6,7]))
(1, 1, 1, 1, 1, 1, 1)
>>> compute_venn3_subsets(Counter([1,3,5,7]), Counter([2,3,6,7]), Counter([4,5,6,7]))
(1, 1, 1, 1, 1, 1, 1)
>>> compute_venn3_subsets(Counter([1,3,5,7]), set([2,3,6,7]), set([4,5,6,7]))
Traceback (most recent call last):
...
ValueError: All arguments must be of the same type
'''
if not (type(a) == type(b) == type(c)):
raise ValueError("All arguments must be of the same type")
set_size = len if type(a) != Counter else lambda x: sum(x.values()) # We cannot use len to compute the cardinality of a Counter
return (set_size(a - (b | c)), # TODO: This is certainly not the most efficient way to compute.
set_size(b - (a | c)),
set_size((a & b) - c),
set_size(c - (a | b)),
set_size((a & c) - b),
set_size((b & c) - a),
set_size(a & b & c)) | Given three set or Counter objects, computes the sizes of (a & ~b & ~c, ~a & b & ~c, a & b & ~c, ....),
as needed by the subsets parameter of venn3 and venn3_circles.
Returns the result as a tuple.
>>> compute_venn3_subsets(set([1,2,3]), set([2,3,4]), set([3,4,5,6]))
(1, 0, 1, 2, 0, 1, 1)
>>> compute_venn3_subsets(Counter([1,2,3]), Counter([2,3,4]), Counter([3,4,5,6]))
(1, 0, 1, 2, 0, 1, 1)
>>> compute_venn3_subsets(Counter([1,1,1]), Counter([1,1,1]), Counter([1,1,1,1]))
(0, 0, 0, 1, 0, 0, 3)
>>> compute_venn3_subsets(Counter([1,1,2,2,3,3]), Counter([2,2,3,3,4,4]), Counter([3,3,4,4,5,5,6,6]))
(2, 0, 2, 4, 0, 2, 2)
>>> compute_venn3_subsets(Counter([1,2,3]), Counter([2,2,3,3,4,4]), Counter([3,3,4,4,4,5,5,6]))
(1, 1, 1, 4, 0, 3, 1)
>>> compute_venn3_subsets(set([]), set([]), set([]))
(0, 0, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([1]), set([]), set([]))
(1, 0, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([]), set([1]), set([]))
(0, 1, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([]), set([]), set([1]))
(0, 0, 0, 1, 0, 0, 0)
>>> compute_venn3_subsets(Counter([]), Counter([]), Counter([1]))
(0, 0, 0, 1, 0, 0, 0)
>>> compute_venn3_subsets(set([1]), set([1]), set([1]))
(0, 0, 0, 0, 0, 0, 1)
>>> compute_venn3_subsets(set([1,3,5,7]), set([2,3,6,7]), set([4,5,6,7]))
(1, 1, 1, 1, 1, 1, 1)
>>> compute_venn3_subsets(Counter([1,3,5,7]), Counter([2,3,6,7]), Counter([4,5,6,7]))
(1, 1, 1, 1, 1, 1, 1)
>>> compute_venn3_subsets(Counter([1,3,5,7]), set([2,3,6,7]), set([4,5,6,7]))
Traceback (most recent call last):
...
ValueError: All arguments must be of the same type |
def off_coordinator(self, year):
"""Returns the coach ID for the team's OC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the OC.
"""
try:
oc_anchor = self._year_info_pq(year, 'Offensive Coordinator')('a')
if oc_anchor:
return oc_anchor.attr['href']
except ValueError:
return None | Returns the coach ID for the team's OC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the OC. |
def load_hdf5(path):
"""Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with h5py.File(path, 'r') as f:
is_sparse = f['issparse'][...]
if is_sparse:
shape = tuple(f['shape'][...])
data = f['data'][...]
indices = f['indices'][...]
indptr = f['indptr'][...]
X = sparse.csr_matrix((data, indices, indptr), shape=shape)
else:
X = f['data'][...]
y = f['target'][...]
return X, y | Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y |
def get_id(self, name, recurse=True):
"""Get the first id matching ``name``. Will either be a local
or a var.
:name: TODO
:returns: TODO
"""
self._dlog("getting id '{}'".format(name))
var = self._search("vars", name, recurse)
return var | Get the first id matching ``name``. Will either be a local
or a var.
:name: TODO
:returns: TODO |
def etree_to_dict(source):
""" Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- An etree Element or ElementTree.
Returns:
A dictionary representing sorce's xml structure where tags with multiple identical childrens
contain list of all their children dictionaries..
>>> etree_to_dict(ET.fromstring('<content><id>12</id><title/></content>'))
{'content': {'id': '12', 'title': None}}
>>> etree_to_dict(ET.fromstring('<content><list><li>foo</li><li>bar</li></list></content>'))
{'content': {'list': [{'li': 'foo'}, {'li': 'bar'}]}}
"""
def etree_to_dict_recursive(parent):
children = parent.getchildren()
if children:
d = {}
identical_children = False
for child in children:
if not identical_children:
if child.tag in d:
identical_children = True
l = [{key: d[key]} for key in d]
l.append({child.tag: etree_to_dict_recursive(child)})
del d
else:
d.update({child.tag: etree_to_dict_recursive(child)})
else:
l.append({child.tag: etree_to_dict_recursive(child)})
return (d if not identical_children else l)
else:
return parent.text
if hasattr(source, 'getroot'):
source = source.getroot()
if hasattr(source, 'tag'):
return {source.tag: etree_to_dict_recursive(source)}
else:
raise TypeError("Requires an Element or an ElementTree.") | Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- An etree Element or ElementTree.
Returns:
A dictionary representing sorce's xml structure where tags with multiple identical childrens
contain list of all their children dictionaries..
>>> etree_to_dict(ET.fromstring('<content><id>12</id><title/></content>'))
{'content': {'id': '12', 'title': None}}
>>> etree_to_dict(ET.fromstring('<content><list><li>foo</li><li>bar</li></list></content>'))
{'content': {'list': [{'li': 'foo'}, {'li': 'bar'}]}} |
def _load_packets(file_h, header, layers=0):
"""
Read packets from the capture file. Expects the file handle to point to
the location immediately after the header (24 bytes).
"""
pkts = []
hdrp = ctypes.pointer(header)
while True:
pkt = _read_a_packet(file_h, hdrp, layers)
if pkt:
pkts.append(pkt)
else:
break
return pkts | Read packets from the capture file. Expects the file handle to point to
the location immediately after the header (24 bytes). |
def run_hive(args, check_return_code=True):
"""
Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing
"""
cmd = load_hive_cmd() + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if check_return_code and p.returncode != 0:
raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
stdout, stderr)
return stdout.decode('utf-8') | Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing |
def delNode(self, address):
"""
Just send it along if requested, should be able to delete the node even if it isn't
in our config anywhere. Usually used for normalization.
"""
if address in self.nodes:
del self.nodes[address]
self.poly.delNode(address) | Just send it along if requested, should be able to delete the node even if it isn't
in our config anywhere. Usually used for normalization. |
def enable_contactgroup_host_notifications(self, contactgroup):
"""Enable host notifications for a contactgroup
Format of the line that triggers function call::
ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to enable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None
"""
for contact_id in contactgroup.get_contacts():
self.enable_contact_host_notifications(self.daemon.contacts[contact_id]) | Enable host notifications for a contactgroup
Format of the line that triggers function call::
ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to enable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None |
def _remove_non_serializable_store_entries(store: Store) -> dict:
"""
Copy all serializable data into a new dict, and skip the rest.
This makes sure to keep the items during runtime, even if the user edits and saves the script.
"""
cleaned_store_data = {}
for key, value in store.items():
if Script._is_serializable(key) and Script._is_serializable(value):
cleaned_store_data[key] = value
else:
_logger.info("Skip non-serializable item in the local script store. Key: '{}', Value: '{}'. "
"This item cannot be saved and therefore will be lost when autokey quits.".format(
key, value
))
return cleaned_store_data | Copy all serializable data into a new dict, and skip the rest.
This makes sure to keep the items during runtime, even if the user edits and saves the script. |
def _start_http_session(self):
"""
Start a new requests HTTP session, clearing cookies and session data.
:return: None
"""
api_logger.debug("Starting new HTTP session...")
self.session = FuturesSession(executor=self.executor, max_workers=self.max_workers)
self.session.headers.update({"User-Agent": self.user_agent})
if self.username and self.password:
api_logger.debug("Requests will use authorization.")
self.session.auth = HTTPBasicAuth(self.username, self.password) | Start a new requests HTTP session, clearing cookies and session data.
:return: None |
def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_tx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal")
tx = ET.SubElement(flowcontrolglobal, "tx")
tx.text = kwargs.pop('tx')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_many(d, required=[], optional=[], one_of=[]):
"""
Returns a predictable number of elements out of ``d`` in a list for auto-expanding.
Keys in ``required`` will raise KeyError if not found in ``d``.
Keys in ``optional`` will return None if not found in ``d``.
Keys in ``one_of`` will raise KeyError if none exist, otherwise return the first in ``d``.
Example::
uid, action, limit, offset = get_many(request.params, required=['uid', 'action'], optional=['limit', 'offset'])
Note: This function has been added to the webhelpers package.
"""
d = d or {}
r = [d[k] for k in required]
r += [d.get(k)for k in optional]
if one_of:
for k in (k for k in one_of if k in d):
return r + [d[k]]
raise KeyError("Missing a one_of value.")
return r | Returns a predictable number of elements out of ``d`` in a list for auto-expanding.
Keys in ``required`` will raise KeyError if not found in ``d``.
Keys in ``optional`` will return None if not found in ``d``.
Keys in ``one_of`` will raise KeyError if none exist, otherwise return the first in ``d``.
Example::
uid, action, limit, offset = get_many(request.params, required=['uid', 'action'], optional=['limit', 'offset'])
Note: This function has been added to the webhelpers package. |
def connect(self, target, acceptor, wrapper=None):
"""
Initiate a connection from the tendril manager's endpoint.
Once the connection is completed, a Tendril object will be
created and passed to the given acceptor.
:param target: The target of the connection attempt.
:param acceptor: A callable which will initialize the state of
the new Tendril object.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
For passing extra arguments to the acceptor or the wrapper,
see the ``TendrilPartial`` class; for chaining together
multiple wrappers, see the ``WrapperChain`` class.
"""
if not self.running:
raise ValueError("TendrilManager not running")
# Check the target address
fam = utils.addr_info(target)
# Verify that we're in the right family
if self.addr_family != fam:
raise ValueError("address family mismatch") | Initiate a connection from the tendril manager's endpoint.
Once the connection is completed, a Tendril object will be
created and passed to the given acceptor.
:param target: The target of the connection attempt.
:param acceptor: A callable which will initialize the state of
the new Tendril object.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
For passing extra arguments to the acceptor or the wrapper,
see the ``TendrilPartial`` class; for chaining together
multiple wrappers, see the ``WrapperChain`` class. |
def find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq):
"""
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
"""
mis_matches = []
# Find start pos of first codon in frame, i_start
codon_offset = (sbjct_start-1) % 3
i_start = 0
if codon_offset != 0:
i_start = 3 - codon_offset
sbjct_start = sbjct_start + i_start
# Set sequences in frame
sbjct_seq = sbjct_seq[i_start:]
qry_seq = qry_seq[i_start:]
# Find codon number of the first codon in the sequence, start at 0
codon_no = int((sbjct_start-1) / 3) # 1,2,3 start on 0
# s_shift and q_shift are used when gaps appears
q_shift = 0
s_shift = 0
mut_no = 0
# Find inserts and deletions in sequence
indel_no = 0
indels = get_indels(sbjct_seq, qry_seq, sbjct_start)
# Go through sequence and save mutations when found
for index in range(0, len(sbjct_seq), 3):
# Count codon number
codon_no += 1
# Shift index according to gaps
s_i = index + s_shift
q_i = index + q_shift
# Get codons
sbjct_codon = sbjct_seq[s_i:s_i+3]
qry_codon = qry_seq[q_i:q_i+3]
if len(sbjct_seq[s_i:].replace("-","")) + len(qry_codon[q_i:].replace("-","")) < 6:
break
# Check for mutations
if sbjct_codon.upper() != qry_codon.upper():
# Check for codon insertions and deletions and frameshift mutations
if "-" in sbjct_codon or "-" in qry_codon:
# Get indel info
try:
indel_data = indels[indel_no]
except IndexError:
print(sbjct_codon, qry_codon)
print(indels)
print(gene, indel_data, indel_no)
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
# Get the affected sequence in frame for both for sbjct and qry
if mut == "ins":
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], 3)
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], int(math.floor(len(sbjct_rf_indel)/3) *3))
else:
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], 3)
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], int(math.floor(len(qry_rf_indel)/3) *3))
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
# Set index to the correct reading frame after the indel gap
shift_diff_before = abs(s_shift - q_shift)
s_shift += len(sbjct_rf_indel) - 3
q_shift += len(qry_rf_indel) - 3
shift_diff = abs(s_shift - q_shift)
if shift_diff_before != 0 and shift_diff %3 == 0:
if s_shift > q_shift:
nucs_needed = int((len(sbjct_rf_indel)/3) *3) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], nucs_needed)
q_shift += len(qry_rf_indel) - len(pre_qry_indel)
elif q_shift > s_shift:
nucs_needed = int((len(qry_rf_indel)/3)*3) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], nucs_needed)
s_shift += len(sbjct_rf_indel) - len(pre_sbjct_indel)
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
if "Frameshift" in mut_name:
mut_name = mut_name.split("-")[0] + "- Frame restored"
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Check if the next mutation in the indels list is in the current codon
# Find the number of individul gaps in the evaluated sequence
no_of_indels = len(re.findall("\-\w", sbjct_rf_indel)) + len(re.findall("\-\w", qry_rf_indel))
if no_of_indels > 1:
for j in range(indel_no, indel_no + no_of_indels - 1):
try:
indel_data = indels[j]
except IndexError:
sys.exit("indel_data list is out of range, bug!")
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Set codon number, and save nucleotides from out of frame mutations
if mut == "del":
codon_no += int((len(sbjct_rf_indel) - 3)/3)
# If evaluated insert is only gaps codon_no should not increment
elif sbjct_rf_indel.count("-") == len(sbjct_rf_indel):
codon_no -= 1
# Check of point mutations
else:
mut = "sub"
aa_ref = aa(sbjct_codon)
aa_alt = aa(qry_codon)
if aa_ref != aa_alt:
# End search for mutation if a premature stop codon is found
mut_name = "p." + aa_ref + str(codon_no) + aa_alt
mis_matches += [[mut, codon_no, codon_no, aa_alt, mut_name, sbjct_codon, qry_codon, aa_ref, aa_alt]]
# If a Premature stop codon occur report it an stop the loop
try:
if mis_matches[-1][-1] == "*":
mut_name += " - Premature stop codon"
mis_matches[-1][4] = mis_matches[-1][4].split("-")[0] + " - Premature stop codon"
break
except IndexError:
pass
# Sort mutations on position
mis_matches = sorted(mis_matches, key = lambda x:x[1])
return mis_matches | This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user. |
def _group_groups(perm_list):
"""Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list
"""
perm_list = sorted(perm_list, key=lambda tup: tup[0])
grouped_perms = []
for key, group in groupby(perm_list, lambda tup: (tup[0], tup[1])):
grouped_perms.append((key[0], key[1], [g[2] for g in group]))
return grouped_perms | Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list |
def CreateVertices(self, points):
"""
Returns a dictionary object with keys that are 2tuples
represnting a point.
"""
gr = digraph()
for z, x, Q in points:
node = (z, x, Q)
gr.add_nodes([node])
return gr | Returns a dictionary object with keys that are 2tuples
represnting a point. |
def intermediate_fluents(self) -> Dict[str, PVariable]:
'''Returns interm-fluent pvariables.'''
return { str(pvar): pvar for pvar in self.pvariables if pvar.is_intermediate_fluent() } | Returns interm-fluent pvariables. |
def query_string(context, key, value):
"""
For adding/replacing a key=value pair to the GET string for a URL.
eg, if we're viewing ?p=3 and we do {% query_string order 'taken' %}
then this returns "p=3&order=taken"
And, if we're viewing ?p=3&order=uploaded and we do the same thing, we get
the same result (ie, the existing "order=uploaded" is replaced).
Expects the request object in context to do the above; otherwise it will
just return a query string with the supplied key=value pair.
"""
try:
request = context['request']
args = request.GET.copy()
except KeyError:
args = QueryDict('').copy()
args[key] = value
return args.urlencode() | For adding/replacing a key=value pair to the GET string for a URL.
eg, if we're viewing ?p=3 and we do {% query_string order 'taken' %}
then this returns "p=3&order=taken"
And, if we're viewing ?p=3&order=uploaded and we do the same thing, we get
the same result (ie, the existing "order=uploaded" is replaced).
Expects the request object in context to do the above; otherwise it will
just return a query string with the supplied key=value pair. |
def progress(params, rep):
""" Helper function to calculate the progress made on one experiment. """
name = params['name']
fullpath = os.path.join(params['path'], params['name'])
logname = os.path.join(fullpath, '%i.log'%rep)
if os.path.exists(logname):
logfile = open(logname, 'r')
lines = logfile.readlines()
logfile.close()
return int(100 * len(lines) / params['iterations'])
else:
return 0 | Helper function to calculate the progress made on one experiment. |
def LoadFromStorage(cls, path=None):
"""Creates an AdWordsClient with information stored in a yaml file.
Args:
[optional]
path: The path string to the file containing cached AdWords data.
Returns:
An AdWordsClient initialized with the values cached in the file.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth2 key was missing.
"""
if path is None:
path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')
return cls(**googleads.common.LoadFromStorage(
path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,
cls._OPTIONAL_INIT_VALUES)) | Creates an AdWordsClient with information stored in a yaml file.
Args:
[optional]
path: The path string to the file containing cached AdWords data.
Returns:
An AdWordsClient initialized with the values cached in the file.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth2 key was missing. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.