content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def is_batch_norm(layer):
""" Return True if `layer` is a batch normalisation layer
"""
classname = layer.__class__.__name__
return classname.find('BatchNorm') != -1 | 6494b75a3fbfbfd55ff43b05536a1094290ea915 | 25,700 |
import torch
def predictive_entropy(y_input, y_target):
"""
Computes the entropy of predictions by the model
:param y_input: Tensor [N, samples, class]
:param y_target: Tensor [N] Not used here.
:return: mean entropy over all examples
"""
y_input = torch.exp(y_input) # model output is log_softmax so we exponentiate
y_posterior = torch.mean(y_input, dim=1) # Average over all the samples to marginalize over epsilon
# y_input is now [N, class]
# We want the entropy of y_input
epsilon = 1e-25
y_posterior += epsilon # We add a small constant to each term to avoid infinities
entropy = - torch.mean(y_posterior * torch.log(y_posterior), dim=1) # [N] entropy on each example
return torch.mean(entropy).cpu().numpy() | 6c3c4c3cfc93d0c19e2662b54a9b6d41146264d5 | 25,701 |
def exec_cmd(cmd_args, *args, **kw):
"""
Execute a shell call using Subprocess. All additional `*args` and
`**kwargs` are passed directly to subprocess.Popen. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of `Popen()`.
:param cmd_args: List of command line arguments.
:type cmd_args: list.
:param args: Additional arguments are passed to Popen().
:param kwargs: Additional keyword arguments are passed to Popen().
:returns: The (stdout, stderror, return_code) of the command.
:rtype: tuple
Usage:
.. code-block:: python
from cement.utils import shell
stdout, stderr, exitcode = shell.exec_cmd(['echo', 'helloworld'])
"""
if not 'stdout' in kw.keys():
kw['stdout'] = PIPE
if not 'stderr' in kw.keys():
kw['stderr'] = PIPE
proc = Popen(cmd_args, *args, **kw)
(stdout, stderr) = proc.communicate()
proc.wait()
return (stdout, stderr, proc.returncode) | c946fce186e56d19c2e182e2061f4f9739a2ce59 | 25,702 |
from datetime import datetime
def roundTime(dt=None, roundTo=1):
"""Round a datetime object to any time period (in seconds)
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 second.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
http://stackoverflow.com/questions/3463930/how-to-round-the-minute-of-a-datetime-object-python/10854034#10854034
"""
if dt == None : dt = datetime.now()
seconds = total_seconds(dt - dt.min)
# // is a floor division, not a comment on following line:
rounding = (seconds+roundTo/2) // roundTo * roundTo
return dt + timedelta(0,rounding-seconds,-dt.microsecond) | c17cbf9092cc2a88cb486afd1a7ff0ad984987bd | 25,703 |
from typing import OrderedDict
import requests
def get_imagery_layers(url):
"""
Get the list of available image layers that can be used as background
or foreground based on the URL to a WTML (WorldWide Telescope image
collection file).
Parameters
----------
url : `str`
The URL of the image collection.
"""
available_layers = OrderedDict()
# Get the XML describing the available surveys
response = requests.get(url)
assert response.ok
b = BytesIO(response.content)
e = ElementTree()
t = e.parse(b)
for survey in t.iter('ImageSet'):
name = survey.attrib['Name']
thumbnail_url = survey.find('ThumbnailUrl').text
if not thumbnail_url:
thumbnail_url = None
available_layers[name] = {'thumbnail': thumbnail_url}
return available_layers | 9e5a37552d18ebd1994c892c2af07bd3a3445ac1 | 25,704 |
def _typecheck(op1, op2):
"""Check the type of parameters used and return correct enum type."""
if isinstance(op1, CipherText) and isinstance(op2, CipherText):
return ParamTypes.CTCT
elif isinstance(op1, PlainText) and isinstance(op2, PlainText):
return ParamTypes.PTPT
elif isinstance(op1, CipherText) and isinstance(op2, PlainText):
return ParamTypes.CTPT
elif isinstance(op1, PlainText) and isinstance(op2, CipherText):
return ParamTypes.PTCT
else:
return None | 872a05347ac26324e26f7444798c977f5cfad2fa | 25,705 |
def vm_update_cb(result, task_id, vm_uuid=None, new_node_uuid=None):
"""
A callback function for api.vm.base.views.vm_manage.
"""
vm = Vm.objects.select_related('dc').get(uuid=vm_uuid)
_vm_update_cb_done(result, task_id, vm)
msg = result.get('message', '')
force = result['meta']['apiview']['force']
if result['returncode'] == 0 and (force or msg.find('Successfully updated') >= 0):
json = result.pop('json', None)
try: # save json from smartos
json_active = vm.json.load(json)
except Exception as e:
logger.exception(e)
logger.error('Could not parse json output from PUT vm_manage(%s). Error: %s', vm_uuid, e)
raise TaskException(result, 'Could not parse json output')
vm_delete_snapshots_of_removed_disks(vm) # Do this before updating json and json_active
vm.json = json_active
update_fields = ['enc_json', 'enc_json_active', 'changed']
ignored_changed_vm_attrs = (
'set_customer_metadata',
'remove_customer_metadata',
'create_timestamp',
'boot_timestamp',
'autoboot',
'vnc_port',
'update_disks',
)
if new_node_uuid:
update_dict = vm.json_update()
for i in ignored_changed_vm_attrs:
update_dict.pop(i, None)
if update_dict:
raise TaskException(result, 'VM definition on compute node differs from definition in DB in '
'following attributes: %s' % ','.join(update_dict.keys()))
update_fields.append('node_id')
old_json_active = vm.json_active
vm.json_active = json_active
if new_node_uuid:
node = Node.objects.get(uuid=new_node_uuid)
vm.set_node(node)
with transaction.atomic():
vm.save(update_node_resources=True, update_storage_resources=True, update_fields=update_fields)
vm_update_ipaddress_usage(vm)
vm_json_active_changed.send(task_id, vm=vm, old_json_active=old_json_active) # Signal!
if new_node_uuid:
vm_node_changed.send(task_id, vm=vm, force_update=True) # Signal!
result['message'] = 'Node association successfully changed on VM %s' % vm.hostname
if vm.json_changed():
vm_update(vm)
else:
logger.error('Found nonzero returncode in result from PUT vm_manage(%s). Error: %s', vm_uuid, msg)
raise TaskException(result, 'Got bad return code (%s). Error: %s' % (result['returncode'], msg))
task_log_cb_success(result, task_id, vm=vm, **result['meta'])
return result | 3d7ad728cb6c3ddd8fe3638f98223635378ce8d7 | 25,706 |
from typing import Union
def _class_effective_mesh_size(
geo_graph: geograph.GeoGraph, class_value: Union[int, str]
) -> Metric:
"""
Return effective mesh size of given class.
Definition taken from:
https://pylandstats.readthedocs.io/en/latest/landscape.html
"""
class_areas = geo_graph.df["geometry"][
geo_graph.df["class_label"] == class_value
].area
total_area = geo_graph.get_metric("total_area").value
description = (
"A <= MESH <= A ; MESH approaches its minimum when there is a single"
" corresponding patch of one pixel, and approaches its maximum when the "
"landscape consists of a single patch."
)
return Metric(
value=np.sum(class_areas ** 2) / total_area,
name=f"effective_mesh_size_class={class_value}",
description=description,
variant="conventional",
unit="CRS.unit**2",
) | fd98849f6a7ff9d9cf17ecd15a3bcd790f6dcb6f | 25,707 |
def noam_schedule(step, warmup_step=4000):
""" original Transformer schedule"""
if step <= warmup_step:
return step / warmup_step
return (warmup_step ** 0.5) * (step ** -0.5) | ad42f6f478f06c2641cb189db769c4a6e0272f6f | 25,708 |
import os
def getoldiddfile(versionid):
"""find the IDD file of the E+ installation
E+ version 7 and earlier have the idd in
/EnergyPlus-7-2-0/bin/Energy+.idd """
vlist = versionid.split('.')
if len(vlist) == 1:
vlist = vlist + ['0', '0']
elif len(vlist) == 2:
vlist = vlist + ['0']
ver_str = '-'.join(vlist)
eplus_exe, _ = eppy.runner.run_functions.install_paths(ver_str)
eplusfolder = os.path.dirname(eplus_exe)
iddfile = '{}/bin/Energy+.idd'.format(eplusfolder, )
return iddfile | 8166a155b8ffcdd71de6f1e5ea7e6a2f517afc4f | 25,709 |
def word_list_to_long(val_list, big_endian=True):
"""Word list (16 bits int) to long list (32 bits int)
By default word_list_to_long() use big endian order. For use little endian, set
big_endian param to False.
:param val_list: list of 16 bits int value
:type val_list: list
:param big_endian: True for big endian/False for little (optional)
:type big_endian: bool
:returns: list of 32 bits int value
:rtype: list
"""
# allocate list for long int
long_list = [None] * int(len(val_list) / 2)
# fill registers list with register items
for i, item in enumerate(long_list):
if big_endian:
long_list[i] = (val_list[i * 2] << 16) + val_list[(i * 2) + 1]
else:
long_list[i] = (val_list[(i * 2) + 1] << 16) + val_list[i * 2]
# return long list
return long_list | 954d1cefc521c2f8fd88492858590df8bc0ce120 | 25,710 |
from typing import List
def _show_problems_info(id_tournament: int) -> List:
"""
Функция возвращает информацию о задачах турнира по его id(id_tournament)
"""
return loop.run_until_complete(get_request(f'https://codeforces.com/api/contest.standings?contestId=1477&from=1&count=5&showUnofficial=true'))['problems'] | 36bafa02b2523c538fefbb5d77496dfddcbda6a1 | 25,711 |
from typing import Dict
from typing import Any
def make_shell_context() -> Dict[str, Any]:
"""Make objects available during shell"""
return {
"db": db,
"api": api,
"Playlist": Playlist,
"User": User,
"BlacklistToken": BlacklistToken,
} | c2121fc95a0916021338d2b39debcc7d88933982 | 25,712 |
async def find_user_by_cards(app, cards, fields=["username"]):
"""Find a user by a list of cards assigned to them.
Parameters
----------
app : aiohttp.web.Application
The aiohttp application instance
cards : list
The list of cards to search for
fields : list, default=["username"]
The fields to be returned in the user document
Returns
-------
user : dict
The user document
"""
if not isinstance(cards, list):
cards = [cards]
projection = {}
for field in fields:
projection[field] = 1
if "_id" not in fields:
projection["_id"] = 0
return await app["db"].users.find_one({"cards": cards}, projection) | ef5b20ea668b39eda51c859a3b33f1af30a644f5 | 25,713 |
def _calc_y_from_dataframe(trafo_df, baseR):
"""
Calculate the subsceptance y from the transformer dataframe.
INPUT:
**trafo** (Dataframe) - The dataframe in net.trafo
which contains transformer calculation values.
RETURN:
**subsceptance** (1d array, np.complex128) - The subsceptance in pu in
the form (-b_img, -b_real)
"""
### Calculate subsceptance ###
unl_squared = trafo_df["vn_lv_kv"].values**2
b_real = trafo_df["pfe_kw"].values / (1000. * unl_squared) * baseR
b_img = (trafo_df["i0_percent"].values / 100. * trafo_df["sn_kva"].values / 1000.)**2 \
- (trafo_df["pfe_kw"].values / 1000.)**2
b_img[b_img < 0] = 0
b_img = np.sqrt(b_img) * baseR / unl_squared
return -b_real * 1j - b_img | f6b3493dd56d93a269b2c82431a5ef0a6a7ff946 | 25,714 |
def mutual_coherence(A, B):
""""Mutual coherence between two dictionaries A and B
"""
max_val, index = mutual_coherence_with_index(A, B)
return max_val | 8e6f8d499e84394ef1af551d4fea9ab9a259c05a | 25,715 |
def pdf_page_enumeration(pdf):
"""Generate a list of pages, using /PageLabels (if it exists). Returns a list of labels."""
try:
pagelabels = pdf.trailer["/Root"]["/PageLabels"]
except:
# ("No /Root/PageLabels object"), so infer the list.
return range(1, pdf.getNumPages() + 1)
# """Select the item that is most likely to contain the information you desire; e.g.
# {'/Nums': [0, IndirectObject(42, 0)]}
# here, we only have "/Num". """
try:
pagelabels_nums = pdf.trailer["/Root"]["/PageLabels"]["/Nums"]
except:
raise CommandError("Malformed PDF, /Root/PageLabels but no .../Nums object")
#
# At this point we have either the object or the list.
# Make it a list.
#
if isinstance(pagelabels_nums, (list,)):
pagelabels_nums_list = pagelabels_nums
else:
pagelabels_nums_list = list(pagelabels_nums)
labels = []
style = None
# default
style = '/D'
prefix = ''
next_pageno = 1
for i in range(0, pdf.getNumPages()):
if len(pagelabels_nums_list) > 0 and i >= pagelabels_nums_list[0]:
pagelabels_nums_list.pop(0) # discard index
pnle = pagelabels_nums_list.pop(0)
style = pnle.get('/S', '/D')
prefix = pnle.get('/P', '')
next_pageno = pnle.get('/St', 1)
pageno_str = ''
if style == '/D':
pageno_str = str(next_pageno)
elif style == '/A':
pageno_str = int_to_page_alpha(next_pageno, 'A')
elif style == '/a':
pageno_str = int_to_page_alpha(next_pageno, 'a')
elif style == '/R':
pageno_str = int_to_roman(next_pageno)
elif style == '/r':
pageno_str = int_to_roman(next_pageno).lower()
else:
raise CommandError("Malformded PDF: unkown page numbering style " + style)
labels.append(prefix + pageno_str)
next_pageno += 1
return labels | 133c97f4d25dc562d08a7d45c9f85cbe04776162 | 25,716 |
import torch
def transform_points_torch(points, homography):
"""Transforms input points according to homography.
Args:
points: [..., H, W, 3]; pixel (u,v,1) coordinates.
homography: [..., 3, 3]; desired matrix transformation
Returns:
output_points: [..., H, W, 3]; transformed (u,v,w) coordinates.
"""
# Because the points have two additional dimensions as they vary across the
# width and height of an image, we need to reshape to multiply by the
# per-image homographies.
points_orig_shape = points.shape
points_reshaped_shape = list(homography.shape)
points_reshaped_shape[-2] = -1
points_reshaped = torch.reshape(points, points_reshaped_shape)
transformed_points = torch.matmul(points_reshaped, transpose_torch(homography))
transformed_points = torch.reshape(transformed_points, points_orig_shape)
return transformed_points | f45bf1b94c360241272bc084adcf6fee1b9f3afe | 25,717 |
def _sane_fekete_points(directions, n_dim):
"""
get fekete points for DirectionalSimulator object.
use get_directions function for other use cases.
"""
if directions is None:
n_dir = n_dim * 80
elif isinstance(directions, int):
n_dir = directions
else:
try:
n_dir, n_dim_dir = directions.shape
except AttributeError:
err_msg = "Only an integer or a numpy array is accepted as "
err_msg += "directions."
raise TypeError(err_msg)
if n_dim != n_dim_dir:
err_msg = "Number of dimensions of the directions does not "
err_msg += "match the number of marginal distributions"
raise ValueError(err_msg)
return directions
return fekete_points(n_dim, n_dir, max_iters=100, tolerance=1e-12) | f1d0a7dfa1438f2a4071536fb048504074e8b95d | 25,718 |
def backoffPolicy(initialDelay=1.0, maxDelay=60.0, factor=1.5,
jitter=_goodEnoughRandom):
"""
A timeout policy for L{ClientService} which computes an exponential backoff
interval with configurable parameters.
@since: 16.1.0
@param initialDelay: Delay for the first reconnection attempt (default
1.0s).
@type initialDelay: L{float}
@param maxDelay: Maximum number of seconds between connection attempts
(default 60 seconds, or one minute). Note that this value is before
jitter is applied, so the actual maximum possible delay is this value
plus the maximum possible result of C{jitter()}.
@type maxDelay: L{float}
@param factor: A multiplicative factor by which the delay grows on each
failed reattempt. Default: 1.5.
@type factor: L{float}
@param jitter: A 0-argument callable that introduces noise into the delay.
By default, C{random.random}, i.e. a pseudorandom floating-point value
between zero and one.
@type jitter: 0-argument callable returning L{float}
@return: a 1-argument callable that, given an attempt count, returns a
floating point number; the number of seconds to delay.
@rtype: see L{ClientService.__init__}'s C{retryPolicy} argument.
"""
def policy(attempt):
try:
delay = min(initialDelay * (factor ** min(100, attempt)), maxDelay)
except OverflowError:
delay = maxDelay
return delay + jitter()
return policy | 679185a35f4e830ded528d59d77b2bf91a548999 | 25,719 |
def block_group(inputs,
filters,
strides,
block_fn,
block_repeats,
conv2d_op=None,
activation=tf.nn.swish,
batch_norm_activation=nn_ops.BatchNormActivation(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Creates one group of blocks for NAS-FPN."""
if block_fn == 'conv':
inputs = conv2d_op(
inputs,
filters=filters,
kernel_size=(3, 3),
padding='same',
data_format=data_format,
name='conv')
inputs = batch_norm_activation(
inputs, is_training=is_training, relu=False, name='bn')
inputs = dropblock(inputs, is_training=is_training)
return inputs
if block_fn != 'bottleneck':
raise ValueError('Block function {} not implemented.'.format(block_fn))
_, _, _, num_filters = inputs.get_shape().as_list()
block_fn = nn_blocks.bottleneck_block
use_projection = not (num_filters == (filters * 4) and strides == 1)
return resnet.block_group(
inputs=inputs,
filters=filters,
strides=strides,
use_projection=use_projection,
block_fn=block_fn,
block_repeats=block_repeats,
activation=activation,
batch_norm_activation=batch_norm_activation,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
name=name,
is_training=is_training) | 990dae88aa1fcad078094f3a667ce5ae48f37521 | 25,720 |
def GeneratePublicKeyDataFromFile(path):
"""Generate public key data from a path.
Args:
path: (bytes) the public key file path given by the command.
Raises:
InvalidArgumentException: if the public key file path provided does not
exist or is too large.
Returns:
A public key encoded using the UTF-8 charset.
"""
try:
public_key_data = arg_parsers.FileContents()(path).strip()
except arg_parsers.ArgumentTypeError as e:
raise gcloud_exceptions.InvalidArgumentException(
'public_key_file',
'{}. Please double check your input and try again.'.format(e))
return public_key_data.encode('utf-8') | a233b31c3ca2328952b09592fe054aff69c5d4ce | 25,721 |
import argparse
def parse_overrides(overrides, pre):
"""Find override parameters in the cli args.
Note:
If you use the same cli flag multiple time the values will be
aggregated into a list. For example
`--x:a 1 --x:a 2 --x:a 3` will give back `{'a': [1, 2, 3]}`
:param overrides: The cli flags and values.
:param pre: only look at keys that start with `--{pre}:`
:returns: The key value pairs from the command line args.
"""
pre = f'--{pre}:'
parser = argparse.ArgumentParser()
for key in set(filter(lambda x: pre in x, overrides)):
# Append action collect each value into a list allowing us to override a
# yaml list by repeating the key.
parser.add_argument(key, action='append', type=_infer_numeric_or_str)
args = parser.parse_known_args(overrides)[0]
return {k.split(":")[1]: v[0] if len(v) == 1 else v for k, v in vars(args).items()} | 21ba5674f25d7a7c1ca8485a7d57783e1fa5371e | 25,722 |
def make_keyword_html(keywords):
"""This function makes a section of HTML code for a list of keywords.
Args:
keywords: A list of strings where each string is a keyword.
Returns:
A string containing HTML code for displaying keywords, for example:
'<strong>Ausgangswörter:</strong> Nature, Plants, Fauna'
"""
res_html = '<strong>Ausgangswörter:</strong> '
for word in keywords[:-1]:
res_html += word + ', '
res_html += keywords[-1]
return res_html | 71e35245ad7b2fe2c67f6a4c27d53374945089bd | 25,723 |
def is_match(set, i):
"""Checks if the three cards all have the same characteristic
Args:
set (2D-list): a set of three cards
i (int): characterstic
Returns:
boolean: boolean
"""
if (set[0][i] == set[1][i] and set[1][i] == set[2][i]):
return True
return False | bd4063dba02f10d7d9d4093aa8f0df8920db17b3 | 25,724 |
def notGroup (states, *stateIndexPairs):
"""Like group, but will add a DEFAULT transition to a new end state,
causing anything in the group to not match by going to a dead state.
XXX I think this is right...
"""
start, dead = group(states, *stateIndexPairs)
finish = len(states)
states.append([])
states[start].append((DEFAULT, finish))
return start, finish | 9fecae45c8cadc2ba2a4a7962416b8b31e8b1bce | 25,725 |
import os
def copy_file_to(local_file_path_or_handle,
cloud_storage_file_path,
metadata=None):
"""Copy local file to a cloud storage path."""
if (isinstance(local_file_path_or_handle, basestring) and
not os.path.exists(local_file_path_or_handle)):
logs.log_error('Local file %s not found.' % local_file_path_or_handle)
return False
return _provider().copy_file_to(
local_file_path_or_handle, cloud_storage_file_path, metadata=metadata) | 3e61e7b4c82ca65e2af47593aaaaccf3ad57eda7 | 25,726 |
import numpy as np
def get_image_array(conn, image_id):
"""
This function retrieves an image from an OMERO server as a numpy array
TODO
"""
image = conn.getObject("Image", image_id)
#construct numpy array (t, c, x, y, z)
size_x = image.getSizeX()
size_y = image.getSizeY()
size_z = image.getSizeZ()
size_c = image.getSizeC()
size_t = image.getSizeT()
# X and Y fields have to be aligned this way since during generation of the image from the numpy array the 2darray is expected to be (Y,X)
# See Documentation here https://downloads.openmicroscopy.org/omero/5.5.1/api/python/omero/omero.gateway.html#omero.gateway._BlitzGateway
hypercube = np.zeros((size_t, size_c, size_y, size_x, size_z))
pixels = image.getPrimaryPixels()
for t in range(size_t):
for c in range(size_c):
for z in range(size_z):
plane = pixels.getPlane(z, c, t) # get a numpy array.
hypercube[t, c, :, :, z] = plane
return hypercube | 25ff59658b189a412a449cad29662f9a3d22ed87 | 25,727 |
def test_softsign():
"""Test using a reference softsign implementation.
"""
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softsign(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softsign(test_values)
assert_allclose(result, expected, rtol=1e-05) | 1de242e1a545ca7a182c3e3b086a551c0774d578 | 25,728 |
def run(
package_out_dir,
package_tests_dir,
work_dir,
packages):
"""Deployes build *.cipd package locally and runs tests against them.
Used to verify the packaged code works when installed as CIPD package, it is
important for infra_python package that has non-trivial structure.
Args:
package_out_dir: where to search for built packages.
work_dir: where to install/update packages into.
packages: names of *.cipd files in package_out_dir or [] for all.
Returns:
0 on success, 1 or error.
"""
# Discover what to test.
paths = []
if not packages:
# Enumerate all known tests in tests/*.py and filter them based on
# availability of corresponding *.cipd package in package_out_dir. It will
# skip any cross-compiled packages, since they have additional '+<platform>'
# suffix in the package file name.
for test in os.listdir(package_tests_dir):
if not test.endswith('.py'):
continue
pkg_file = os.path.join(
package_out_dir, os.path.splitext(test)[0] + '.cipd')
if os.path.exists(pkg_file):
paths.append(pkg_file)
else:
for name in packages:
abs_path = os.path.join(package_out_dir, name)
if not os.path.isfile(abs_path):
raise TestException('No such package file: %s' % name)
paths.append(abs_path)
paths = sorted(paths)
if not paths:
print 'Nothing to test.'
return 0
cipd_client = find_cipd()
if not cipd_client:
return 1
# Run all tests sequentially. Most of the are extra fast.
nuke_temp = False
if not work_dir:
work_dir = tempfile.mkdtemp(suffix='cipd_test')
nuke_temp = True
work_dir = os.path.abspath(work_dir)
try:
fail = False
for path in paths:
name = os.path.splitext(os.path.basename(path))[0]
test_script = os.path.join(package_tests_dir, '%s.py' % name)
if not os.path.isfile(test_script):
print 'Skipping tests for %s - no such file: %s' % (name, test_script)
continue
try:
run_test(
cipd_client=cipd_client,
package=path,
work_dir=os.path.join(work_dir, name),
test_script=test_script)
print ''
print 'PASS'
except TestException as exc:
print >> sys.stderr, ''
print >> sys.stderr, 'FAILED! ' * 10
print >> sys.stderr, 'Tests for %s failed: %s' % (name, exc)
fail = True
return 1 if fail else 0
finally:
if nuke_temp:
try:
shutil.rmtree(work_dir, ignore_errors=True)
except OSError as exc:
print >> sys.stderr, 'Failed to delete %s: %s' % (work_dir, exc) | 277613b42502725d751cfdc0493f545f4fb46125 | 25,729 |
def get_published_questions(quiz):
"""
Returns the QuerySet of the published questions for the given quiz
"""
questions = get_questions_by_quiz(quiz) #Questions are ordered by serial number
return questions.filter(published = True) | 19cec8b57954ae68605de1acefef57f0a68671da | 25,730 |
def moving_average(x, window):
"""
:param int window: odd windows preserve phase
From http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DataFiltering.ipynb
"""
return np.convolve(x, np.ones(window) / window, "same") | c0bd4438d54e3a26bf398f019ca40eec62db83a2 | 25,731 |
def validate_interval_avg_data(in_data): # test
"""Validates input to get_avg_since for correct fields
Args:
in_data: dictionary received from POST request
Returns:
boolean: if in_data contains the correct fields
"""
expected_keys = {"patient_id", "heart_rate_average_since"}
for key in in_data.keys():
if key not in expected_keys:
return False
return True | 0fcf927c3912bea594554fdffc73312a7da4d628 | 25,732 |
def _get_cm_control_command(action='--daemon', cm_venv_name='CM', ex_cmd=None):
"""
Compose a system level command used to control (i.e., start/stop) CloudMan.
Accepted values to the ``action`` argument are: ``--daemon``, ``--stop-daemon``
or ``--reload``. Note that this method will check if a virtualenv
``cm_venv_name`` exists and, if it does, the returned control command
will include activation of the virtualenv. If the extra command ``ex_cmd``
is provided, insert that command into the returned activation command.
Example return string: ``cd /mnt/cm; [ex_cmd]; sh run.sh --daemon``
"""
if _virtualenv_exists(cm_venv_name):
cmd = _with_venvburrito("workon {0}; cd {1}; {3}; sh run.sh {2}"
.format(cm_venv_name, CM_HOME, action, ex_cmd))
else:
cmd = "cd {0}; {2}; sh run.sh {1}".format(CM_HOME, action, ex_cmd)
return cmd | d6c4448da86ddd790977c4c0c150b748dd8f26b7 | 25,733 |
def get_all_db_data() -> list:
"""Возвращает все строки из базы"""
cursor.execute('''SELECT * FROM news''')
res = cursor.fetchall()
return res | 52f0e59f892898f15a361c5d1de56898f48ac2d7 | 25,734 |
def astra_projector(vol_interp, astra_vol_geom, astra_proj_geom, ndim, impl):
"""Create an ASTRA projector configuration dictionary.
Parameters
----------
vol_interp : {'nearest', 'linear'}
Interpolation type of the volume discretization. This determines
the projection model that is chosen.
astra_vol_geom : dict
ASTRA volume geometry.
astra_proj_geom : dict
ASTRA projection geometry.
ndim : {2, 3}
Number of dimensions of the projector.
impl : {'cpu', 'cuda'}
Implementation of the projector.
Returns
-------
proj_id : int
Handle for the created ASTRA internal projector object.
"""
if vol_interp not in ('nearest', 'linear'):
raise ValueError("`vol_interp` '{}' not understood"
''.format(vol_interp))
impl = str(impl).lower()
if impl not in ('cpu', 'cuda'):
raise ValueError("`impl` '{}' not understood"
''.format(impl))
if 'type' not in astra_proj_geom:
raise ValueError('invalid projection geometry dict {}'
''.format(astra_proj_geom))
if ndim == 3 and impl == 'cpu':
raise ValueError('3D projectors not supported on CPU')
ndim = int(ndim)
proj_type = astra_proj_geom['type']
if proj_type not in ('parallel', 'fanflat', 'fanflat_vec',
'parallel3d', 'parallel3d_vec', 'cone', 'cone_vec'):
raise ValueError('invalid geometry type {!r}'.format(proj_type))
# Mapping from interpolation type and geometry to ASTRA projector type.
# "I" means probably mathematically inconsistent. Some projectors are
# not implemented, e.g. CPU 3d projectors in general.
type_map_cpu = {'parallel': {'nearest': 'line',
'linear': 'linear'}, # I
'fanflat': {'nearest': 'line_fanflat',
'linear': 'line_fanflat'}, # I
'parallel3d': {'nearest': 'linear3d', # I
'linear': 'linear3d'}, # I
'cone': {'nearest': 'linearcone', # I
'linear': 'linearcone'}} # I
type_map_cpu['fanflat_vec'] = type_map_cpu['fanflat']
type_map_cpu['parallel3d_vec'] = type_map_cpu['parallel3d']
type_map_cpu['cone_vec'] = type_map_cpu['cone']
# GPU algorithms not necessarily require a projector, but will in future
# releases making the interface more coherent regarding CPU and GPU
type_map_cuda = {'parallel': 'cuda', # I
'parallel3d': 'cuda3d'} # I
type_map_cuda['fanflat'] = type_map_cuda['parallel']
type_map_cuda['fanflat_vec'] = type_map_cuda['fanflat']
type_map_cuda['cone'] = type_map_cuda['parallel3d']
type_map_cuda['parallel3d_vec'] = type_map_cuda['parallel3d']
type_map_cuda['cone_vec'] = type_map_cuda['cone']
# create config dict
proj_cfg = {}
if impl == 'cpu':
proj_cfg['type'] = type_map_cpu[proj_type][vol_interp]
else: # impl == 'cuda'
proj_cfg['type'] = type_map_cuda[proj_type]
proj_cfg['VolumeGeometry'] = astra_vol_geom
proj_cfg['ProjectionGeometry'] = astra_proj_geom
proj_cfg['options'] = {}
# Add the hacky 1/r^2 weighting exposed in intermediate versions of
# ASTRA
if (proj_type in ('cone', 'cone_vec') and
astra_supports('cone3d_hacky_density_weighting')):
proj_cfg['options']['DensityWeighting'] = True
if ndim == 2:
return astra.projector.create(proj_cfg)
else:
return astra.projector3d.create(proj_cfg) | df3458ea09d2a9bffdced2738404aec419e7b48f | 25,735 |
def __hit(secret_number, choice):
"""Check if the choice is equal to secret number"""
return secret_number == choice | 55bee8370a2480b5ca84cd5f478fd8eb367276bd | 25,736 |
def minimal_product_data(setup_data):
"""Valid product data (only required fields)"""
return {
'name': 'Bar',
'rating': .5,
'brand_id': 1,
'categories_ids': [1],
'items_in_stock': 111,
} | ecf027704ea8533d71468527335201a021d8ae4f | 25,737 |
def flatten_column_array(df, columns, separator="|"):
"""Fonction qui transforme une colonne de strings séparés par un
séparateur en une liste : String column -> List column"""
df[columns] = (
df[columns].applymap(lambda x: separator.join(
[str(json_nested["name"]) for json_nested in x]))
)
return df | 770b519a5b086d872e4bd16bc92663f693453745 | 25,738 |
from typing import List
def part2(lines: List[List[int]]):
"""
"""
grid = Grid.from_text(lines)
lim_x = grid.width()
lim_y = grid.height()
for by in range(5):
for bx in range(5):
if bx == by == 0:
continue
for dy in range(lim_y):
for dx in range(lim_x):
grid[bx * lim_x + dx, by * lim_y + dy] = ((lines[dy][dx] + bx + by - 1) % 9) + 1
best = perform_seek(grid)
return best
# 2914 | c11c452e71b61b7cda98acda6908832aec7bca60 | 25,739 |
def translate_point(point, y_offset=0, x_offset=0):
"""Translate points.
This method is mainly used together with image transforms, such as padding
and cropping, which translates the top left point of the image
to the coordinate :math:`(y, x) = (y_{offset}, x_{offset})`.
Args:
point (~numpy.ndarray or list of arrays): See the table below.
y_offset (int or float): The offset along y axis.
x_offset (int or float): The offset along x axis.
.. csv-table::
:header: name, shape, dtype, format
:obj:`point`, ":math:`(R, K, 2)` or :math:`[(K, 2)]`", \
:obj:`float32`, ":math:`(y, x)`"
Returns:
~numpy.ndarray:
Points modified translation of an image.
"""
if isinstance(point, np.ndarray):
out_point = point.copy()
out_point[:, :, 0] += y_offset
out_point[:, :, 1] += x_offset
else:
out_point = []
for pnt in point:
out_pnt = pnt.copy()
out_pnt[:, 0] += y_offset
out_pnt[:, 1] += x_offset
out_point.append(out_pnt)
return out_point | fffd18a2df12e8d51b0ea30fe378da37aa245d5d | 25,740 |
def _get_tau_var(tau, tau_curriculum_steps):
"""Variable which increases linearly from 0 to tau over so many steps."""
if tau_curriculum_steps > 0:
tau_var = tf.get_variable('tau', [],
initializer=tf.constant_initializer(0.0),
trainable=False)
tau_var = tau_var.assign(
tf.minimum(float(tau), tau_var + float(tau) / tau_curriculum_steps))
else:
tau_var = tf.get_variable('tau', [],
initializer=tf.constant_initializer(float(tau)),
trainable=False)
return tau_var | a4ba777a70df55f22299e415e7998dc303c0b3cf | 25,741 |
def des_descrypt(s):
"""
DES 解密
:param s: 加密后的字符串,16进制
:return: 解密后的字符串
"""
iv = constants.gk
k = des(iv, CBC, iv, pad=None, padmode=PAD_PKCS5)
#print binascii.b2a_hex(s)
de = k.decrypt(binascii.a2b_hex(s), padmode=PAD_PKCS5)
print de
return de | 2a1224ec5a197928aedc6b4168762bac7f287624 | 25,742 |
import zipfile
import io
def ghg_call(url, response, args):
"""
Callback function for the US GHG Emissions download. Open the downloaded zip file and
read the contained CSV(s) into pandas dataframe(s).
:param url:
:param response:
:param args:
:return:
"""
df = None
year = args['year']
with zipfile.ZipFile(io.BytesIO(response.content), "r") as f:
frames = []
# TODO: replace this TABLES constant with kwarg['tables']
if 'annex' in url:
is_annex = True
t_tables = ANNEX_TABLES
else:
is_annex = False
t_tables = TABLES
for chapter, tables in t_tables.items():
for table in tables:
# path = os.path.join("Chapter Text", chapter, f"Table {table}.csv")
if is_annex:
path = f"Annex/Table {table}.csv"
else:
path = f"Chapter Text/{chapter}/Table {table}.csv"
data = f.open(path)
if table not in SPECIAL_FORMAT:
df = pd.read_csv(data, skiprows=2, encoding="ISO-8859-1", thousands=",")
elif '3-' in table:
# Skip first two rows, as usual, but make headers the next 3 rows:
df = pd.read_csv(data, skiprows=2, encoding="ISO-8859-1", header=[0, 1, 2], thousands=",")
# The next two rows are headers and the third is units:
new_headers = []
for col in df.columns:
# unit = col[2]
new_header = 'Unnamed: 0'
if 'Unnamed' not in col[0]:
if 'Unnamed' not in col[1]:
new_header = f'{col[0]} {col[1]}'
else:
new_header = col[0]
if 'Unnamed' not in col[2]:
new_header += f' {col[2]}'
# unit = col[2]
elif 'Unnamed' in col[0] and 'Unnamed' not in col[2]:
new_header = col[2]
new_headers.append(new_header)
df.columns = new_headers
print('break')
elif '4-' in table:
df = pd.read_csv(data, skiprows=2, encoding="ISO-8859-1", thousands=",", decimal=".")
elif 'A-' in table:
if table == 'A-17':
# A-17 is similar to T 3-23, the entire table is 2012 and headings are completely different.
if str(year) == '2012':
df = pd.read_csv(data, skiprows=2, encoding="ISO-8859-1", header=[0, 1], thousands=",")
new_headers = []
header_grouping = ''
for col in df.columns:
if 'Unnamed' in col[0]:
# new_headers.append(f'{header_grouping}{col[1]}')
new_headers.append(f'{fix_a17_headers(col[1])}{header_grouping}')
else:
if len(col) == 2:
# header_grouping = f'{col[0]}__'
if col[0] == A_17_TBTU_HEADER[0]:
header_grouping = f' {A_17_TBTU_HEADER[1].strip()}'
else:
header_grouping = f' {A_17_CO2_HEADER[1].strip()}'
# new_headers.append(f'{header_grouping}{col[1]}')
new_headers.append(f'{fix_a17_headers(col[1])}{header_grouping}')
df.columns = new_headers
nan_col = 'Electricity Power Emissions (MMT CO2 Eq.) from Energy Use'
fill_col = 'Unnamed: 12_level_1 Emissions (MMT CO2 Eq.) from Energy Use'
df = df.drop(nan_col, 1)
df.columns = [nan_col if x == fill_col else x for x in df.columns]
df['Year'] = year
else:
df = pd.read_csv(data, skiprows=1, encoding="ISO-8859-1", thousands=",", decimal=".")
if df is not None and len(df.columns) > 1:
years = YEARS.copy()
years.remove(str(year))
df = df.drop(columns=(DROP_COLS + years), errors='ignore')
# Assign SourceName now while we still have access to the table name:
source_name = f"EPA_GHG_Inventory_T_{table.replace('-', '_')}"
df["SourceName"] = source_name
frames.append(df)
# return pd.concat(frames)
return frames | 17665c9ddab5406c147b641b47376a089cf11206 | 25,743 |
def get_compound_coeff_func(phi=1.0, max_cost=2.0):
"""
Cost function from the EfficientNets paper
to compute candidate values for alpha, beta
and gamma parameters respectively.
These values are then used to train models,
and the validation accuracy is used to select
the best base parameter set at phi = 1.
# Arguments:
phi: The base power of the parameters. Kept as 1
for initial search of base parameters.
max_cost: The maximum cost of permissible. User
defined constant generally set to 2.
# Returns:
A function which accepts a numpy vector of 3 values,
and computes the mean squared error between the
`max_cost` value and the cost computed as
`cost = x[0] * (x[1] ** 2) * (x[2] ** 2)`.
# References:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
"""
def compound_coeff(x, phi=phi, max_cost=max_cost):
depth = alpha = x[0]
width = beta = x[1]
resolution = gamma = x[2]
# scale by power. Phi is generally kept as 1.0 during search.
alpha = alpha ** phi
beta = beta ** phi
gamma = gamma ** phi
# compute the cost function
cost = alpha * (beta ** 2) * (gamma ** 2)
return (cost - max_cost) ** 2
return compound_coeff | ec2e3e07a93741827c934e05d2e4e7e5e4a54901 | 25,744 |
import re
def get_kver_bin(path, split=False, proc=None):
"""
Get version of a kernel binary at 'path'. The 'split' and 'proc' arguments are the same as in
'get_kver()'.
"""
if not proc:
proc = Procs.Proc()
cmd = f"file -- {path}"
stdout = proc.run_verify(cmd)[0].strip()
msg = f"ran this command: {cmd}, got output:\n{stdout}"
matchobj = re.match(r".* Linux kernel.* executable .*", stdout)
if not matchobj:
raise Error(f"file at '{path}'{proc.hostmsg} is not a Linux kernel binary file\n{msg}")
matchobj = re.match(r".* version ([^ ]+) .*", stdout)
if not matchobj:
raise Error(f"{msg}\nFailed to find kernel version in the output.")
kver = matchobj.group(1)
if split:
return split_kver(kver)
return kver | 212da809d1c2dc52e7bf7cee2aafd89d9437eadb | 25,745 |
from datetime import datetime
def post_apply_become_provider_apply_id_accept(request: HttpRequest, apply_id, **kwargs) -> JsonResponse:
"""
允许成为设备拥有者
:param request: 视图请求
:type request: HttpRequest
:param kwargs: 额外参数
:type kwargs: Dict
:return: JsonResponse
:rtype: JsonResponse
"""
handle_reason: str = request.POST.get('handle_reason', '')
applications: QuerySet = PermApply.objects.filter(apply_id=apply_id)
if len(applications) == 0:
return JsonResponse(common.create_error_json_obj(303, '该申请不存在'), status=400)
application: PermApply = applications.first()
if application.status != common.PENDING:
return JsonResponse(common.create_error_json_obj(304, '该申请已处理'), status=400)
applicant: User = application.applicant
applicant.change_group('provider')
applicant.save()
application.status = common.APPROVED
application.handler = request.user
application.handle_time = int(datetime.now(timezone.utc).timestamp())
application.handle_reason = handle_reason
application.save()
pm.send_system_message_to_by_user(application.applicant, common.PM_IMPORTANT,
common.create_prem_apply_handle_message(common.APPROVED))
mail.send_perm_apply_accept(applicant.email, application)
return common.create_success_json_res_with({}) | 1440d2ae9495b75c398000d3367d68fdb84f2c00 | 25,746 |
import tokenize
def get_var_info(line, frame):
"""Given a line of code and a frame object, it obtains the
value (repr) of the names found in either the local or global scope.
"""
tokens = utils.tokenize_source(line)
loc = frame.f_locals
glob = frame.f_globals
names_info = []
names = []
for tok in tokens:
if tok.type == tokenize.NAME:
name = tok.string
if name in names:
continue
names.append(name)
result = ""
if name in loc:
result = format_var_info(tok, loc)
elif name in glob:
result = format_var_info(tok, glob, _global=True)
if result:
names_info.append(result)
if names_info:
names_info.append("")
return "\n".join(names_info) | d584e1c83a9bf7d0134be1251b92cb789a7ac51c | 25,747 |
import operator
def filter_events_for_client(store, user_id, events, is_peeking=False,
always_include_ids=frozenset()):
"""
Check which events a user is allowed to see
Args:
store (synapse.storage.DataStore): our datastore (can also be a worker
store)
user_id(str): user id to be checked
events(list[synapse.events.EventBase]): sequence of events to be checked
is_peeking(bool): should be True if:
* the user is not currently a member of the room, and:
* the user has not been a member of the room since the given
events
always_include_ids (set(event_id)): set of event ids to specifically
include (unless sender is ignored)
Returns:
Deferred[list[synapse.events.EventBase]]
"""
# Filter out events that have been soft failed so that we don't relay them
# to clients.
events = list(e for e in events if not e.internal_metadata.is_soft_failed())
types = (
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
)
event_id_to_state = yield store.get_state_for_events(
frozenset(e.event_id for e in events),
state_filter=StateFilter.from_types(types),
)
ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
"m.ignored_user_list", user_id,
)
# FIXME: This will explode if people upload something incorrect.
ignore_list = frozenset(
ignore_dict_content.get("ignored_users", {}).keys()
if ignore_dict_content else []
)
erased_senders = yield store.are_users_erased((e.sender for e in events))
def allowed(event):
"""
Args:
event (synapse.events.EventBase): event to check
Returns:
None|EventBase:
None if the user cannot see this event at all
a redacted copy of the event if they can only see a redacted
version
the original event if they can see it as normal.
"""
if not event.is_state() and event.sender in ignore_list:
return None
if event.event_id in always_include_ids:
return event
state = event_id_to_state[event.event_id]
# get the room_visibility at the time of the event.
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
if visibility_event:
visibility = visibility_event.content.get("history_visibility", "shared")
else:
visibility = "shared"
if visibility not in VISIBILITY_PRIORITY:
visibility = "shared"
# Always allow history visibility events on boundaries. This is done
# by setting the effective visibility to the least restrictive
# of the old vs new.
if event.type == EventTypes.RoomHistoryVisibility:
prev_content = event.unsigned.get("prev_content", {})
prev_visibility = prev_content.get("history_visibility", None)
if prev_visibility not in VISIBILITY_PRIORITY:
prev_visibility = "shared"
new_priority = VISIBILITY_PRIORITY.index(visibility)
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
if old_priority < new_priority:
visibility = prev_visibility
# likewise, if the event is the user's own membership event, use
# the 'most joined' membership
membership = None
if event.type == EventTypes.Member and event.state_key == user_id:
membership = event.content.get("membership", None)
if membership not in MEMBERSHIP_PRIORITY:
membership = "leave"
prev_content = event.unsigned.get("prev_content", {})
prev_membership = prev_content.get("membership", None)
if prev_membership not in MEMBERSHIP_PRIORITY:
prev_membership = "leave"
# Always allow the user to see their own leave events, otherwise
# they won't see the room disappear if they reject the invite
if membership == "leave" and (
prev_membership == "join" or prev_membership == "invite"
):
return event
new_priority = MEMBERSHIP_PRIORITY.index(membership)
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
if old_priority < new_priority:
membership = prev_membership
# otherwise, get the user's membership at the time of the event.
if membership is None:
membership_event = state.get((EventTypes.Member, user_id), None)
if membership_event:
membership = membership_event.membership
# if the user was a member of the room at the time of the event,
# they can see it.
if membership == Membership.JOIN:
return event
# otherwise, it depends on the room visibility.
if visibility == "joined":
# we weren't a member at the time of the event, so we can't
# see this event.
return None
elif visibility == "invited":
# user can also see the event if they were *invited* at the time
# of the event.
return (
event if membership == Membership.INVITE else None
)
elif visibility == "shared" and is_peeking:
# if the visibility is shared, users cannot see the event unless
# they have *subequently* joined the room (or were members at the
# time, of course)
#
# XXX: if the user has subsequently joined and then left again,
# ideally we would share history up to the point they left. But
# we don't know when they left. We just treat it as though they
# never joined, and restrict access.
return None
# the visibility is either shared or world_readable, and the user was
# not a member at the time. We allow it, provided the original sender
# has not requested their data to be erased, in which case, we return
# a redacted version.
if erased_senders[event.sender]:
return prune_event(event)
return event
# check each event: gives an iterable[None|EventBase]
filtered_events = map(allowed, events)
# remove the None entries
filtered_events = filter(operator.truth, filtered_events)
# we turn it into a list before returning it.
defer.returnValue(list(filtered_events)) | fdebb3cf493c14328d3292eb4f4adf4c370511c3 | 25,748 |
from typing import Union
from typing import Optional
def MPS_SimAddRule(event_mask: Union[IsoSimulatorEvent,
FeliCaSimulatorEvent,
VicinitySimulatorEvent,
NfcSimulatorEvent,
Type2TagSimulatorEvent],
delay: float, execute_count: Optional[int],
pattern_condition: Optional[ActionConditionDataPattern],
remote_command: str) -> int:
"""Adds a simulation rule
Parameters
----------
event_mask : IsoSimulatorEvent, Type2TagSimulatorEvent, \
FeliCaSimulatorEvent, VicinitySimulatorEvent \
or NfcSimulatorEvent
Mask of events which triggers the rule
delay : float
Delay between event occurrence and rule execution in s
execute_count : int
Rule executions count, or None if always active
pattern_condition : ActionConditionDataPattern
Pattern condition
remote_command : str
Remote command to run when the rule conditions are met
Returns
-------
int
Rule identifier
"""
if isinstance(event_mask, IsoSimulatorEvent):
protocol = _SimulatorProtocol.CL_14443_SIMULATOR
elif isinstance(event_mask, FeliCaSimulatorEvent):
protocol = _SimulatorProtocol.CL_FELICA_SIMULATOR
elif isinstance(event_mask, VicinitySimulatorEvent):
protocol = _SimulatorProtocol.CL_VICINITY_SIMULATOR
elif isinstance(event_mask, NfcSimulatorEvent):
protocol = _SimulatorProtocol.CL_NFC_SIMULATOR
elif isinstance(event_mask, Type2TagSimulatorEvent):
protocol = _SimulatorProtocol.CL_TAG_TYPE2_SIMULATOR
else:
raise TypeError('event_mask must be an instance of '
'IsoSimulatorEvent IntFlag, '
'FeliCaSimulatorEvent IntFlag, '
'VicinitySimulatorEvent IntFlag, '
'NfcSimulatorEvent IntFlag or '
'Type2TagSimulatorEvent IntFlag')
# Unit auto-selection
computed_unit, [computed_delay] = _unit_autoselect(NfcUnit.UNIT_S, [delay])
_check_limits(c_uint32, computed_delay, 'delay')
if execute_count is not None:
_check_limits(c_uint32, execute_count, 'execute_count')
count = execute_count
else:
count = 0xFFFFFFFF # Always active
rule_id = c_uint32()
if pattern_condition is None:
CTS3Exception._check_error(_MPuLib.MPS_SimAddRule(
c_uint8(0),
c_uint32(protocol),
c_uint32(event_mask),
c_uint32(computed_delay),
c_uint32(computed_unit),
c_uint32(count),
c_uint32(0),
c_uint32(0),
c_uint32(0),
c_uint32(0),
None,
c_uint32(len(remote_command)),
remote_command.encode('ascii'),
byref(rule_id)))
elif isinstance(pattern_condition, ActionConditionDataPattern):
length = c_uint32(len(pattern_condition.pattern))
temp = bytearray(pattern_condition.pattern +
b'\x00' * (256 - len(pattern_condition.mask)))
mask = (c_uint8 * 256).from_buffer(temp)
temp = bytearray(pattern_condition.mask +
b'\x00' * (256 - len(pattern_condition.pattern)))
pattern = (c_uint8 * 256).from_buffer(temp)
condition_ctypes = _ActionConditionDataPattern(length, mask, pattern)
CTS3Exception._check_error(_MPuLib.MPS_SimAddRule(
c_uint8(0),
c_uint32(protocol),
c_uint32(event_mask),
c_uint32(computed_delay),
c_uint32(computed_unit),
c_uint32(count),
c_uint32(0),
c_uint32(0),
c_uint32(0),
c_uint32(516),
byref(condition_ctypes),
c_uint32(len(remote_command)),
remote_command.encode('ascii'),
byref(rule_id)))
else:
raise TypeError('pattern_condition must be an instance of '
'ActionConditionDataPattern')
return rule_id.value | d945885635eba27c74edb33eb72e6bac3791a190 | 25,749 |
def wr1996(size=200):
"""Generate '6d robot arm' dataset (Williams and Rasmussen 1996)
Was originally created in order to test the correctness of the
implementation of kernel ARD. For full details see:
http://www.gaussianprocess.org/gpml/code/matlab/doc/regression.html#ard
x_1 picked randomly in [-1.932, -0.453]
x_2 picked randomly in [0.534, 3.142]
r_1 = 2.0
r_2 = 1.3
f(x_1,x_2) = r_1 cos (x_1) + r_2 cos(x_1 + x_2) + N(0,0.0025)
etc.
Expected relevances:
ell_1 1.804377
ell_2 1.963956
ell_3 8.884361
ell_4 34.417657
ell_5 1081.610451
ell_6 375.445823
sigma_f 2.379139
sigma_n 0.050835
"""
intervals = np.array([[-1.932, -0.453], [0.534, 3.142]])
r = np.array([2.0, 1.3])
x = np.random.rand(size, 2)
x *= np.array(intervals[:, 1]-intervals[:, 0])
x += np.array(intervals[:, 0])
if __debug__:
for i in xrange(2):
debug('DG', '%d columnt Min: %g Max: %g' %
(i, x[:, i].min(), x[:, i].max()))
y = r[0]*np.cos(x[:, 0] + r[1]*np.cos(x.sum(1))) + \
np.random.randn(size)*np.sqrt(0.0025)
y -= y.mean()
x34 = x + np.random.randn(size, 2)*0.02
x56 = np.random.randn(size, 2)
x = np.hstack([x, x34, x56])
return dataset_wizard(samples=x, targets=y) | 112e4760db98f5ca32846a88429a15494d5fa814 | 25,750 |
import json
def to_pretty_json(obj):
"""Encode to pretty-looking JSON string"""
return json.dumps(obj, sort_keys=False,
indent=4, separators=(',', ': ')) | b325c4e6e150e089da1d9027299831bd1576e57f | 25,751 |
def parse_access_token(request):
"""Get request object and parse access token"""
try:
auth_header = request.headers.get('Authorization')
return auth_header.split(" ")[1]
except Exception as e:
return | a51d51d83cba5fc8e8eb7b9a9147a0219e2bcb20 | 25,752 |
def postscriptWeightNameFallback(info):
"""
Fallback to the closest match of the *openTypeOS2WeightClass*
in this table:
=== ===========
100 Thin
200 Extra-light
300 Light
400 Normal
500 Medium
600 Semi-bold
700 Bold
800 Extra-bold
900 Black
=== ===========
"""
value = getAttrWithFallback(info, "openTypeOS2WeightClass")
value = int(round(value * .01) * 100)
if value < 100:
value = 100
elif value > 900:
value = 900
name = _postscriptWeightNameOptions[value]
return name | 4521375a668c81fdee9a3fc20391f633a60af777 | 25,753 |
def down_spatial(in_planes, out_planes):
"""downsampling 21*21 to 5*5 (21-5)//4+1=5"""
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=4),
nn.BatchNorm2d(out_planes)) | 2570eb4f837d45a3683f8594ef9ba0fa5b996445 | 25,754 |
def add_integral_control(
plant, regulator=None, integrator_ugf=None,
integrator_time_constant=None, **kwargs):
"""Match and returns an integral gain.
This function finds an integral gain such that
the UGF of the integral control matches that of the specified
regulator.
If ``integrator_ugf`` or ``integrator_time_constant`` is specified
instead, these will be matched instead.
Parameters
----------
plant : TransferFunction
The transfer function representation of the system to be feedback
controlled.
regulator : TransferFunction, optional
The pre-regulator
Use ``kontrol.regulator.feedback.proportional_derivative()`` or
``kontrol.regulator.feedback.critical_damping()`` to make one
for oscillator-like systems.
integrator_ugf : float, optional
The unity gain frequency (Hz) of the integral control.
This is the inverse of the integration time constant.
If ``integrator_time_constant is not None``, then this value will be
ignored.
If set to None, it'll be set to match the first UGF of the
derivative control.
Defaults to None.
integrator_time_constant : float, optional,
The integration time constant (s) for integral control.
Setting this will override the ``integrator_ugf`` argument.
Defaults to None.
Returns
-------
ki : float
The integral control gain.
"""
s = control.tf("s")
oltf_int = 1/s * plant.dcgain()
if integrator_time_constant is not None:
integrator_ugf = 1/integrator_time_constant
ki = 1 / abs(oltf_int(1j*2*np.pi*integrator_ugf))
elif integrator_ugf is not None:
ki = 1 / abs(oltf_int(1j*2*np.pi*integrator_ugf))
elif regulator is not None:
oltf = plant * regulator
_, _, _, _, ugf, _ = control.stability_margins(
oltf, returnall=True)
ki = 1 / abs(oltf_int(1j*min(ugf)))
else:
raise ValueError("At least one of regulator, integrator_ugf, or "
"integrator_time_constant must be specified.")
return ki | 16c6efe598e60e325f7be2fe018156f89deaac11 | 25,755 |
def script():
"""Render the required Javascript"""
return Response(response=render_template("settings/settings.js"),
status=200,
mimetype="application/javascript") | d879fded0ebf2e160d3dcbc541ae07bc08571b8e | 25,756 |
def repr_helper(tuple_gen_exp, ind=2):
""" given a sequence of 2-tuples, return a nice string like:
.. code_block:: python
(1, 'hi'), (2, 'there'), (40, 'you') ->
.. code_block:: python
[ 1] : hi
[ 2] : there
[40] : you
"""
lines = []
k_v = list(tuple_gen_exp)
if not k_v:
return " "*ind + "(empty)"
max_len_k = max(len(str(k_vp[0])) for k_vp in k_v)
for k, v in sorted(k_v):
lines.append(" "*ind + "[%s] : %s" % (lpad(k, max_len_k), v))
return "\n".join(lines) | a80739ac09167ce582bf35dc8e7ce1c7654ba6e2 | 25,757 |
from IPython import get_ipython
def in_ipython() -> bool:
"""Return true if we're running in an IPython interactive shell."""
try:
return get_ipython().__class__.__name__ == 'TerminalInteractiveShell'
except Exception:
pass
return False | f0a92dfc8c02da2761c5f2074b3928943e7abd8f | 25,758 |
def demo_loss_accuracy_curve():
"""Make a demo loss-accuracy curve figure."""
steps = np.arange(101)
loss = np.exp(-steps * 0.1) * 20. + np.random.normal(size=101) * 2.
loss = loss - np.min(loss) + .2
valid_steps = np.arange(0, 101, 10)
valid_loss = (np.exp(-valid_steps * 0.1) * 25. +
np.random.normal(size=11) * 0.5)
valid_loss = valid_loss - np.min(valid_loss)
valid_acc = np.exp(-valid_loss * 0.1)
return Section(
'Training Metrics',
loss_accuracy_curve(
metrics=[
{'name': 'loss', 'steps': steps, 'values': loss},
{'name': 'valid loss', 'steps': valid_steps,
'values': valid_loss},
],
secondary_metrics=[
{'name': 'valid acc', 'steps': valid_steps,
'values': valid_acc},
],
title='Training Loss & Validation Loss / Accuracy'
)
) | 1d530d0f4f6c830a974fd634c636f691595f1d38 | 25,759 |
import fbuild.builders
import os
import platform
def guess_platform(ctx, arch=None):
"""L{guess_platform} returns a platform set that describes the various
features of the specified I{platform}. If I{platform} is I{None}, try to
determine which platform the system is and return that value. If the
platform cannot be determined, return I{None}."""
ctx.logger.check('determining platform')
if arch is None:
# If we're on Windows, then don't even try uname
if os.name == 'nt':
res = archmap[platform.system().lower()]
ctx.logger.passed(res)
return frozenset(res)
# Let's see if uname exists
try:
uname = fbuild.builders.find_program(ctx, ['uname'], quieter=1)
except fbuild.builders.MissingProgram:
# Maybe we're on windows. Let's just use what python thinks is the
# platform.
#arch = os.name
arch = platform.system().lower()
else:
# We've got uname, so let's see what platform it thinks we're on.
try:
stdout, stderr = ctx.execute((uname, '-s'), quieter=1)
except fbuild.ExecutionError:
# Ack, that failed too. Just fall back to python.
#arch = os.name
arch = platform.system().lower()
else:
arch = stdout.decode('utf-8').strip().lower()
if arch.startswith('mingw32'):
arch = 'mingw'
elif arch.startswith('cygwin'):
arch = 'cygwin'
try:
architecture = archmap[arch]
except KeyError:
ctx.logger.failed()
raise UnknownPlatform(arch)
else:
ctx.logger.passed(architecture)
return frozenset(architecture) | 19a39bd4c6ecfa31b242b3362ab9574d2d6b1436 | 25,760 |
def text_filter(sentence:str)-> str:
"""
过滤掉非汉字和标点符号和非数字
:param sentence:
:return:
"""
line = sentence.replace('\n', '。')
# 过滤掉非汉字和标点符号和非数字
linelist = [word for word in line if
word >= u'\u4e00' and word <= u'\u9fa5' or word in [',', '。', '?', '!',
':'] or word.isdigit()]
return ''.join(linelist) | 9c0949b2e9b374f1aa5392b5a4c215ebff21171b | 25,761 |
import math
def get_lr_schedule(base_lr, global_batch_size, base_batch_size=None,
scaling=None, n_warmup_epochs=0, warmup_factor=-1, decay_schedule={}, is_root=True):
"""Get the learning rate schedule function"""
if scaling == 'linear':
scale_factor = global_batch_size / base_batch_size
elif scaling == 'sqrt':
scale_factor = math.sqrt(global_batch_size / base_batch_size)
else:
scale_factor = 1.;
peak_lr = base_lr * scale_factor
init_lr = peak_lr * warmup_factor if warmup_factor >= 0 else base_lr
# MLPerf logging
# NOTE: there is currently a confusing mismatch between the parameter
# naming convention in this implementation and MLPerf's hyperparameter
# conventions. Here we define base LR to be the LR at a baseline batch
# size and the "peak" LR to be the value scaled according to current batch
# size. We will leave things as-is for now.
if is_root:
mllogger = mllog.get_mllogger()
mllogger.event(key=mllog.constants.OPT_BASE_LR, value=peak_lr)
mllogger.event(key=mllog.constants.OPT_LR_WARMUP_EPOCHS, value=n_warmup_epochs)
mllogger.event(key=mllog.constants.OPT_LR_WARMUP_FACTOR, value=warmup_factor if warmup_factor >= 0 else init_lr / peak_lr)
decay_name = decay_schedule['name']
if decay_name == 'step':
decay_steps = decay_schedule.copy()
decay_steps.pop('name')
mllogger.event(key=mllog.constants.OPT_LR_DECAY_BOUNDARY_EPOCHS,
value=sorted(decay_steps.keys()))
mllogger.event(key=mllog.constants.OPT_LR_DECAY_FACTOR,
value=max(decay_steps.values()) if len(decay_steps)>0 else 1)
return partial(_lr_schedule, init_lr=init_lr, peak_lr=peak_lr,
n_warmup_epochs=n_warmup_epochs,
decay_schedule=decay_schedule) | 385d9f01992dc650732420580803b147068f70fc | 25,762 |
import math
def stdp_values(values, period=None):
"""Returns list of running population standard deviations.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed population standard deviations.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = stdp_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['0.00', '2.00', '2.16', '2.16', '3.68', '5.44', '5.56']
"""
results = _varbases(values, period, population=True)
_sqrt = math.sqrt
return [_sqrt(x) for x in results] | b3be172dc377325b75ac7f8fe908751b47ecca58 | 25,763 |
def json_serialize(item):
"""
A function similar to L{dumps}.
"""
def helper(unknown):
if isinstance(unknown, PlatedElement):
return unknown._asJSON()
else:
raise TypeError("{input} not JSON serializable"
.format(input=unknown))
return dumps(item, default=helper) | 00ea0aa060eaf2335148402770eba173592e1a65 | 25,764 |
def get_sop_instance_uid(dicom):
"""
Return the SOP Instance UID.
Args:
dicom: pydicom Dataset
"""
return dicom.SOPInstanceUID | 18991a81be2e143aecaf59bdf52e51ab2f9a621b | 25,765 |
def remove_filter():
"""
Removes a filter from the process
Returns
-------------
dictio
Success, or not
"""
# reads the session
session = request.args.get('session', type=str)
# reads the requested process name
process = request.args.get('process', default='receipt', type=str)
if check_session_validity(session):
user = get_user_from_session(session)
if lh.check_user_log_visibility(user, process):
# reads the specific filter to add
filter = request.json['filter']
# reads all the filters
all_filters = request.json['all_filters']
new_handler = lh.get_handler_for_process_and_session(process, session).remove_filter(filter, all_filters)
lh.set_handler_for_process_and_session(process, session, new_handler)
return jsonify({"status": "OK"})
return jsonify({"status": "FAIL"}) | 887c985694fd11e5adeff23e001d9b94f7879ff5 | 25,766 |
def flag_data(vis_windows, flag_windows):
""" Returns flag_windows untouched """
def _flag_data(vis_windows, flag_windows):
return flag_windows
return da.blockwise(_flag_data, _WINDOW_SCHEMA,
vis_windows, _WINDOW_SCHEMA,
flag_windows, _WINDOW_SCHEMA,
dtype=vis_windows.dtype) | 55f3df8fa6ca30de2cd6c9bac5f55eb4fff0eb36 | 25,767 |
def preprocess_static_feature(
static_feature_dict,
imputation_strategy="median",
standardize=False
):
"""Preprocessing for a dictionary of static features.
Args:
static_feature_dict: Dictionary of float values.
imputation_strategy: "median" or "mean" or "most_frequent" imputation.
standardize: If true, apply MinMax scaling.
Returns:
Preprocessing static features array and the fitted scaler.
"""
if imputation_strategy not in ["mean", "median", "most_frequent", "constant"]:
raise ValueError(
"Given imputation strategy {} is not supported. "
"Use value from ['mean', 'median', 'most_frequent', 'constant'] "
"as an imputation strategy.".format(imputation_strategy))
location_keys = list(static_feature_dict.keys())
static_values = np.empty([len(location_keys), 1])
for idx, location_key in enumerate(location_keys):
if static_feature_dict[location_key] is not None:
static_values[idx] = static_feature_dict[location_key]
else:
static_values[idx] = np.nan
if imputation_strategy:
# Check whether all elements are NaNs.
if np.all(np.isnan(static_values)):
static_values = np.zeros_like(static_values)
else:
imputer = SimpleImputer(
missing_values=np.nan, strategy=imputation_strategy)
static_values = imputer.fit_transform(static_values)
scaler = None
if standardize:
# Check if there are at least two elements with different values to avoid
# issues.
if np.unique(static_values).size < 2:
static_values = np.zeros_like(static_values)
else:
scaler = preprocessing.MinMaxScaler()
static_values = scaler.fit_transform(static_values)
static_feature_dict = {}
for ind, location_key in enumerate(location_keys):
static_feature_dict[location_key] = static_values[ind, 0]
return static_feature_dict, scaler | a2bfbfc21afcd1ab03f86f82e72fff480746542e | 25,768 |
import time
import gzip
import six
import sys
import io
from io import StringIO
import csv
def auto_sampler(dataset, encoding, ui):
"""
Automatically find an appropriate number of rows to send per batch based
on the average row size.
:return:
"""
t0 = time()
sample_size = AUTO_SAMPLE_SIZE
is_gz = dataset.endswith('.gz')
opener, mode = (gzip.open, 'rb') if is_gz else (open, 'rU')
with opener(dataset, mode) as dfile:
sample = dfile.read(sample_size)
if six.PY3 and not is_gz:
sample = sample.encode(encoding or 'utf-8')
ingestable_sample = sample.decode(encoding)
size_bytes = sys.getsizeof(ingestable_sample.encode('utf-8'))
if size_bytes < (sample_size * 0.75):
# if dataset is tiny, don't bother auto sampling.
ui.info('auto_sampler: total time seconds - {}'.format(time() - t0))
ui.info('auto_sampler: defaulting to {} samples for small dataset'
.format(AUTO_SMALL_SAMPLES))
return AUTO_SMALL_SAMPLES
if six.PY3:
buf = io.StringIO()
buf.write(ingestable_sample)
else:
buf = StringIO.StringIO()
buf.write(sample)
buf.seek(0)
file_lines, csv_lines = 0, 0
dialect = csv.get_dialect('dataset_dialect')
fd = Recoder(buf, encoding)
reader = csv.reader(fd, dialect=dialect, delimiter=dialect.delimiter)
line_pos = []
for _ in buf:
file_lines += 1
line_pos.append(buf.tell())
# remove the last line since it's probably not fully formed
try:
buf.truncate(line_pos[-2])
buf.seek(0)
file_lines -= 1
except IndexError:
# PRED-1240 there's no guarantee that we got _any_ fully formed lines.
# If so, the dataset is super wide, so we only send 10 rows at a time
return AUTO_SAMPLE_FALLBACK
try:
for _ in reader:
csv_lines += 1
except csv.Error:
if buf.tell() in line_pos[-3:]:
ui.debug('auto_sampler: caught csv.Error at end of sample. '
'seek_position: {}, csv_line: {}'.format(buf.tell(),
line_pos))
else:
ui.fatal('--auto_sample failed to parse the csv file. Try again '
'without --auto_sample. seek_position: {}, '
'csv_line: {}'.format(buf.tell(), line_pos))
raise
else:
ui.debug('auto_sampler: analyzed {} csv rows'.format(csv_lines))
buf.close()
avg_line = int(size_bytes / csv_lines)
chunk_size_goal = AUTO_GOAL_SIZE # size we want per batch
lines_per_sample = int(chunk_size_goal / avg_line) + 1
ui.debug('auto_sampler: lines counted: {}, avgerage line size: {}, '
'recommended lines per sample: {}'.format(csv_lines, avg_line,
lines_per_sample))
ui.info('auto_sampler: total time seconds - {}'.format(time() - t0))
return lines_per_sample | ab90f55ce6d61de6559f1ab59a35f65c16e63166 | 25,769 |
from typing import List
def from_emso(platform_code: str, parameters: List[str]=[], start_time: str='',
end_time: str='', depth_min: float=None, depth_max: float=None,
user: str='', password: str='', size: int=10, token: str=''
) -> WaterFrame:
"""
Get a WaterFrame with the data of the EMSO API (api.emso.eu).
Parameters
----------
user: str
Login for the EMSO ERIC API.
password: str
Password for the EMSO ERIC API.
token: str
Token for the EMSO ERIC API.
platform_code: str
Data filtered by platform_code
parameters: List[str]
List of parameters to get data
start_time: str
First date of the measurement
end_time: str
Last date of the measurement
depth_min: float
Minimum depth of the measurement
depth_max: float
Maximum depth of the measurement
size: int
Number of values
Returns
-------
wf: WaterFrame object
"""
# Login to EMSO
emso = EMSO(user=user, password=password, token=token)
# Get metadata
metadata_list = emso.get_metadata(platform_codes=[platform_code])
# Get data
complete_data = DataFrame()
for parameter in parameters:
data_list = emso.get_data(platform_codes=[platform_code], parameters=[parameter],
start_time=start_time, end_time=end_time, depth_min=depth_min,
depth_max=depth_max, size=size, sort='asc')
if data_list:
data = pd.DataFrame(data_list)
data = data.rename(
columns = {
'time': 'TIME',
'time_qc': 'TIME_QC',
'value': parameter,
'value_qc': f'{parameter}_QC',
'depth': 'DEPTH',
'depth_qc': 'DEPTH_QC'
})
del data['parameter']
del data['metadata_id']
del data['platform_code']
del data['institution']
del data['area']
del data['long_name']
del data['units']
del data['location']
del data['location_qc']
data['TIME'] = pd.to_datetime(data['TIME'])
data.set_index(['DEPTH', 'TIME'], inplace=True)
data = data.astype(float)
complete_data = complete_data.append(data)
wf = WaterFrame()
if metadata_list:
wf.metadata = metadata_list[0]
wf.data = complete_data.copy()
for parameter in wf.parameters:
find = False
for metadata_parameter in wf.metadata['parameters']:
try:
acronym = metadata_parameter.split(' - ')[0].strip()
long_name = metadata_parameter.split(' - ')[1].strip().split('[')[0].strip()
units = metadata_parameter.split(' - ')[1].strip().split('[')[1][:-1]
if parameter == acronym:
wf.vocabulary[acronym] = {'long_name': long_name, 'units': units}
find = True
break
except:
pass
if not find:
wf.vocabulary[parameter] = {'long_name': '', 'units': ''}
return wf | d8ac4520a3364e92d5e52768e12211989cdc876b | 25,770 |
def _get_all_pivots(Ao, number_of_subsamples):
"""
A dummy case where we return all the subsamples.
"""
return np.arange(1, len(number_of_subsamples)) | 886a32ac34f0eb3acafa15917258e87b68b7323c | 25,771 |
def clang_find_var(tu, name, ts, namespace=None, filename=None, onlyin=None):
"""Find the node for a given var."""
assert isinstance(name, basestring)
kinds = CursorKind.ENUM_DECL,
decls = clang_find_decls(tu, name, kinds=kinds, onlyin=onlyin, namespace=namespace)
decls = list(set(c.get_definition() or c for c in decls)) # Use definitions if available
if len(decls)==1:
return decls[0]
# Nothing found, time to complain
where = clang_where(namespace, filename)
if not decls:
raise ValueError("var '{0}' could not be found{1}".format(name, where))
else:
raise ValueError("var '{0}' found more than once ({2} times) {1}".format(name, len(decls), where)) | cacdf6426353f99ed66f051c7d118706dc76f134 | 25,772 |
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm):
"""Returns whether a test should be run on an instance's internal IP.
Based on the command line flag --ip_addresses. Internal IP addresses are used
when:
* --ip_addresses=BOTH or --ip-addresses=INTERNAL
* --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its
internal IP.
Args:
sending_vm: VirtualMachine. The client.
receiving_vm: VirtualMachine. The server.
Returns:
Whether a test should be run on an instance's internal IP.
"""
return (FLAGS.ip_addresses in (IpAddressSubset.BOTH,
IpAddressSubset.INTERNAL) or
(FLAGS.ip_addresses == IpAddressSubset.REACHABLE and
sending_vm.IsReachable(receiving_vm))) | 961fc7343d6c3712d62fd1897f5897c49f6ec66d | 25,773 |
def shn_get_crud_string(tablename, name):
""" Get the CRUD strings for a table """
crud_strings = s3.crud_strings.get(tablename, s3.crud_strings)
not_found = s3.crud_strings.get(name, None)
return crud_strings.get(name, not_found) | 106aba2b964b43d0afec45fa09cb68798bcc6a11 | 25,774 |
def default_invalid_token_callback(error_string):
"""
By default, if an invalid token attempts to access a protected endpoint, we
return the error string for why it is not valid with a 422 status code
:param error_string: String indicating why the token is invalid
"""
return jsonify({config.error_msg_key: error_string}), 422 | 70674a70a7b35838b154d2405e991c2653d02d8a | 25,775 |
def bf_generate_dataplane(snapshot=None, extra_args=None):
# type: (Optional[str], Optional[Dict[str, Any]]) -> str
"""Generates the data plane for the supplied snapshot. If no snapshot argument is given, uses the last snapshot initialized."""
return bf_session.generate_dataplane(snapshot=snapshot, extra_args=extra_args) | 4134208bfef878e38f0428f8e8d830a7cc7b9377 | 25,776 |
def k_correction_pl(redshift, a_nu):
"""Calculate the k-correction for a power law spectrum with spectral
index (per frequency) a_nu.
:param redshift: Cosmological redshift of the source
:type redshift: float
:param a_nu: Power law index (per frequency)
:type a_nu: float
:return: K-correction
:rtype: float
"""
# Hogg 1999, eq. 27
return -2.5 * (1. + a_nu) * np.log10(1.+redshift) * units.mag | e8fb84f3b98f49d4f1bb904eee61916f4a4bc3de | 25,777 |
def make_points_image(pts, mask, radius=5):
"""
Create label image from physical space points
Creates spherical points in the coordinate space of the target image based
on the n-dimensional matrix of points that the user supplies. The image
defines the dimensionality of the data so if the input image is 3D then
the input points should be 2D or 3D.
ANTsR function: `makePointsImage`
Arguments
---------
pts : numpy.ndarray
input powers points
mask : ANTsImage
mask defining target space
radius : integer
radius for the points
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> import pandas as pd
>>> mni = ants.image_read(ants.get_data('mni')).get_mask()
>>> powers_pts = pd.read_csv(ants.get_data('powers_mni_itk'))
>>> powers_labels = ants.make_points_image(powers_pts.iloc[:,:3].values, mni, radius=3)
"""
powers_lblimg = mask * 0
npts = len(pts)
dim = mask.dimension
if pts.shape[1] != dim:
raise ValueError('points dimensionality should match that of images')
for r in range(npts):
pt = pts[r,:]
idx = tio.transform_physical_point_to_index(mask, pt.tolist() ).astype(int)
in_image = (np.prod(idx <= mask.shape)==1) and (len(np.where(idx<0)[0])==0)
if ( in_image == True ):
if (dim == 3):
powers_lblimg[idx[0],idx[1],idx[2]] = r + 1
elif (dim == 2):
powers_lblimg[idx[0],idx[1]] = r + 1
return utils.morphology( powers_lblimg, 'dilate', radius, 'grayscale' ) | dfb94ca6a80e315571c53c1c4b5377f35eec771c | 25,778 |
def map_replacements():
""" create a map of what resources are replaced by others. This is a tree. """
isreplacedby = {} # isreplacedby[x] is the number of things that are replaced by x
replaces = {} # replaces[x] are the number of things that x replaces.
for r in BaseResource.objects.all():
if r.metadata and r.metadata.relations:
for s in r.metadata.relations.filter(type='isReplacedBy'):
uri = s.value
if uri.startswith('http://www.hydroshare.org/resource/'):
rid = uri[-32:]
# r.short_id "is replaced by" rid
if r.short_id in isreplacedby:
isreplacedby[r.short_id].append(rid)
else:
isreplacedby[r.short_id] = [rid]
# rid "replaces" r.short_id:
# replaces[rid] is the things rid replaces.
if rid in replaces:
replaces[rid].append(r.short_id)
else:
replaces[rid] = [r.short_id]
return isreplacedby, replaces | faf4e63bd902325d9c0563e60db0bb1348ed3e38 | 25,779 |
def order_basemaps(key, out):
"""check the apy key and then order the basemap to update the select list"""
# checking the key validity
validate_key(key, out)
out.add_msg(cm.planet.mosaic.load)
# autheticate to planet
planet.client = api.ClientV1(api_key=planet.key)
# get the basemap names
# to use when PLanet decide to update it's API, until then I manually retreive the mosaics
# mosaics = planet.client.get_mosaics().get()['mosaics']
url = planet.client._url("basemaps/v1/mosaics")
mosaics = (
planet.client._get(url, api.models.Mosaics, params={"_page_size": 1000})
.get_body()
.get()["mosaics"]
)
# filter the mosaics in 3 groups
bianual, monthly, other, res = [], [], [], []
for m in mosaics:
name = m["name"]
type_, short = mosaic_name(name)
if type_ == "ANALYTIC_MONTHLY":
monthly.append({"text": short, "value": name})
elif type_ == "ANALYTIC_BIANUAL":
bianual.append({"text": short, "value": name})
elif type_ == "OTHER":
monthly.append({"text": short, "value": name})
# fill the results with the found mosaics
if len(bianual):
res += [{"header": "NICFI bianual"}] + bianual
if len(monthly):
res += [{"header": "NICFI monthly"}] + monthly
if len(other):
res += [{"header": "other"}] + other
out.add_msg(cm.planet.mosaic.complete, "success")
print(mosaics)
return res | 491296f3231e093817119cd4ed72f2c90b2e02d8 | 25,780 |
from typing import Any
def explanare_optionem(thing: Any) -> str:
"""Output debug information from data structures used on HDP containers
Args:
thing (Any): Anything that can be converted to str
Returns:
str: String
"""
return str(thing) | 59238b1f53382a4b925e39b4eac221a98b6af6fe | 25,781 |
def compute_iou(box1, box2, yxyx=False):
"""Calculates the intersection of union between box1 and box2.
Args:
box1: a `Tensor` whose shape is [..., 4] and represents the coordinates of
boxes in x_center, y_center, width, height.
box2: a `Tensor` whose shape is [..., 4] and represents the coordinates of
boxes in x_center, y_center, width, height.
yxyx: `bool`, whether or not box1, and box2 are in yxyx format.
Returns:
iou: a `Tensor` whose shape is [...] and value represents the intersection
over union.
Raises:
ValueError: If the last dimension of either box1 or box2 is not 4.
"""
# Get box corners
with tf.name_scope('iou'):
if not yxyx:
box1 = xcycwh_to_yxyx(box1)
box2 = xcycwh_to_yxyx(box2)
b1mi, b1ma = tf.split(box1, 2, axis=-1)
b2mi, b2ma = tf.split(box2, 2, axis=-1)
intersect_mins = tf.math.maximum(b1mi, b2mi)
intersect_maxes = tf.math.minimum(b1ma, b2ma)
intersect_wh = tf.math.maximum(intersect_maxes - intersect_mins,
tf.zeros_like(intersect_mins))
intersection = tf.reduce_prod(
intersect_wh, axis=-1) # intersect_wh[..., 0] * intersect_wh[..., 1]
box1_area = tf.math.abs(tf.reduce_prod(b1ma - b1mi, axis=-1))
box2_area = tf.math.abs(tf.reduce_prod(b2ma - b2mi, axis=-1))
union = box1_area + box2_area - intersection
iou = intersection / (union + 1e-7)
iou = tf.clip_by_value(iou, clip_value_min=0.0, clip_value_max=1.0)
return iou | 47c9f9d6a10cc35984640c177ff49d637cf9a656 | 25,782 |
from typing import Union
import warnings
def ensure_pyspark_df(spark_session: SparkSession, df: Union[pandasDF, sparkDF]):
"""Method for checking dataframe type for each onData() call from a RunBuilder."""
if not isinstance(df, sparkDF):
warnings.warn(
"WARNING: You passed in a Pandas DF, so we will be using our experimental utility to "
"convert it to a PySpark DF."
)
df = PandasConverter.pandasDF_to_pysparkDF(spark_session, df)
return df | 0662665f93791640fed18d2615563e71d0abe1c3 | 25,783 |
from datetime import datetime
def roundTime(dt=None, roundTo=60):
"""
Round a datetime object to any time lapse in seconds
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
Example:
roundTo=30*60 - 30 minutes
"""
if dt == None : dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds+roundTo/2) // roundTo * roundTo
return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond) | 0c949a0c69e2a9db38cff6e83b299d022b095ad3 | 25,784 |
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
recipe_defaults = {
'title': 'simple ricepi shot',
'time_minutes': 10,
'price': 5.0
}
recipe_defaults.update(params)
return Recipe.objects.create(user=user, **recipe_defaults) | 8142b277f193ad76d3bb6d287963c6699001c636 | 25,785 |
def read_nitf_offsets(filename):
"""Read NITF fields relevant to parsing SICD
SICD (versions 0.3 and above) is stored in a NITF container. NITF is a
complicated format that involves lots of fields and configurations
possibilities. Fortunately, SICD only really uses a small, specific
portion of the NITF format. This function extracts only the few parts of
the NITF metadata necessary for reading a SICD NITF file.
"""
# We have to open as binary, since there is some binary data in the file.
# Python doesn't seem to let us read just part of the file as utf-8.
with open(filename, mode='rb') as fid:
# Read NITF file header
if fid.read(9).decode('ascii') != "NITF02.10": # Check format
raise(IOError('SICD files must be NITF version 2.1'))
fid.seek(354) # Offset to first field of interest
hl = np.uint32(fid.read(6)) # File header length
numi = np.uint32(fid.read(3)) # Number of image segments
img_segment_subhdr_lengths = np.zeros(numi, 'uint64')
img_segment_data_lengths = np.zeros(numi, 'uint64')
nitf = {}
# Offset to image segment data from beginning of file (in bytes)
nitf['img_segment_offsets'] = np.zeros(numi, 'uint64')
# Number of rows in each image segment (in case data is spread across
# multiple image segments)
nitf['img_segment_rows'] = np.zeros(numi, 'uint32')
# Number of columns in each image segment (in case data is spread
# across multiple image segments)
nitf['img_segment_columns'] = np.zeros(numi, 'uint32')
for i in range(numi):
img_segment_subhdr_lengths[i] = np.uint64(fid.read(6))
nitf['img_segment_offsets'][i] = (
hl +
np.sum(img_segment_subhdr_lengths) +
np.sum(img_segment_data_lengths))
img_segment_data_lengths[i] = np.uint64(fid.read(10))
segment_length = np.uint64(fid.read(3))
if segment_length > 0:
raise(IOError('SICD does not allow for graphics segments.'))
segment_length = np.uint64(fid.read(3))
if segment_length > 0:
raise(IOError('SICD does not allow for reserved extension segments.'))
numt = np.uint64(fid.read(3))
text_segment_subhdr_lengths = np.zeros(numt, 'uint64')
text_segment_data_lengths = np.zeros(numt, 'uint64')
for i in range(numt):
text_segment_subhdr_lengths[i] = np.uint64(fid.read(4))
text_segment_data_lengths[i] = np.uint64(fid.read(5))
numdes = np.uint32(fid.read(3)) # Number of data extension segments
des_subhdr_lengths = np.zeros(numdes, 'uint64')
des_data_lengths = np.zeros(numdes, 'uint64')
for i in range(numdes):
# Length of data extension segment subheader
des_subhdr_lengths[i] = np.uint32(fid.read(4))
# Length of data extension segment data
des_data_lengths[i] = np.uint32(fid.read(9))
nitf['des_lengths'] = des_data_lengths
nitf['des_offsets'] = (
hl + np.sum(img_segment_subhdr_lengths) +
np.sum(img_segment_data_lengths) +
np.sum(text_segment_subhdr_lengths) +
np.sum(text_segment_data_lengths) +
np.cumsum(des_subhdr_lengths) +
# Doesn't work on older version of NumPy due to an unsafe cast
# np.cumsum(np.insert(des_data_lengths[:-1], 0, 0))
# This should work in all versions of numpy:
np.cumsum(np.hstack((np.uint64(0), des_data_lengths[:-1]))))
# Get number of rows for each image segment from image segment headers
next_img_subhdr_offset = hl
for i in range(numi):
fid.seek(next_img_subhdr_offset) # Jump to ith image segment
fid.seek(333, 1) # Jump to number of rows field
nitf['img_segment_rows'][i] = np.uint32(fid.read(8))
nitf['img_segment_columns'][i] = np.uint32(fid.read(8))
next_img_subhdr_offset = (
next_img_subhdr_offset +
img_segment_subhdr_lengths[i] + img_segment_data_lengths[i])
return nitf | e2e8bb3ee6b32cf8964b1c13b4e8584cc7fd9917 | 25,786 |
from datetime import datetime
def dttime(ts):
"""
将DataTime对象转换为unix时间
:param ts: unix时间
:type ts: float
:returns: datetime.datetime 对象
:rtype: datetime.datetime
"""
return datetime.datetime.fromtimestamp(ts) | 7a3691df61f7fad641445b7b84ea88ae39f6dbb9 | 25,787 |
import copy
def split_import(sc, node, alias_to_remove):
"""Split an import node by moving the given imported alias into a new import.
Arguments:
sc: (scope.Scope) Scope computed on whole tree of the code being modified.
node: (ast.Import|ast.ImportFrom) An import node to split.
alias_to_remove: (ast.alias) The import alias node to remove. This must be a
child of the given `node` argument.
Raises:
errors.InvalidAstError: if `node` is not appropriately contained in the tree
represented by the scope `sc`.
"""
parent = sc.parent(node)
parent_list = None
for a in ('body', 'orelse', 'finalbody'):
if hasattr(parent, a) and node in getattr(parent, a):
parent_list = getattr(parent, a)
break
else:
raise errors.InvalidAstError('Unable to find list containing import %r on '
'parent node %r' % (node, parent))
idx = parent_list.index(node)
new_import = copy.deepcopy(node)
new_import.names = [alias_to_remove]
node.names.remove(alias_to_remove)
parent_list.insert(idx + 1, new_import)
return new_import | e83f2af8af108f3512e539ab50a8e264ccceb24f | 25,788 |
def load_trace(path):
"""Load the trace located in path.
Args:
path (string): Path to the LTTng trace folder.
Returns:
babeltrace.TraceCollection: a collection of one trace.
"""
trace_collection = bt.TraceCollection()
trace_collection.add_trace(path, 'ctf')
return trace_collection | eed21b6d3ac62104e9661c26cd6d996768562e89 | 25,789 |
def get_repo_paths(config_file_path):
"""
Get a list of repository paths.
Arguments:
config_file_path (str): Path the to config file.
Raises:
(ConfigFileError): Raised if there was an error opening, reading or parsding through the config file.
Returns:
(list<str>): A list of repository paths.
"""
config = read_yml(config_file_path)
if config is None:
raise ConfigFileError()
return list(config['repos'].keys()) | 2b79d55d83a40689206213097c1ddb8ed3535e69 | 25,790 |
async def get_bosswins_rank(conn : asyncpg.Connection, user_id : int) -> int:
"""Returns the rank in bosswins for the player given"""
psql = """
WITH ranks AS (
SELECT ROW_NUMBER() OVER (ORDER BY bosswins DESC) AS rank,
user_id, user_name, bosswins
FROM players
)
SELECT rank, user_id, user_name, bosswins
FROM ranks
WHERE user_id = $1
LIMIT 1;
"""
return await conn.fetchval(psql, user_id, timeout=0.2) | f87746fd3e77b6e41922bc9efbe2e9a2fead8b09 | 25,791 |
def detect_objects_yolo(imgs, tensors):
"""This function makes use of multiprocessing to make predictions on batch.
Parameters
----------
imgs : list-like of images
tensors : dict
Contains tensors needed for making predictions.
Returns
-------
boxes: tuple
Tuple of length `n_images` containing list of boxes for each image.
scores: tuple
classes: tuple
Note that this object already converts label index to label (e.g from 1
to "phone").
"""
yolo_net = tensors["yolo_net"]
boxes_data = pool.map(lambda img: return_predict(yolo_net, img), imgs)
boxes, scores, classes = list(zip(*boxes_data))
return np.array(boxes), np.array(scores), np.array(classes) | 41a511e2fea6cb5abe449f995245ba3a76ae38f2 | 25,792 |
def append_id(endpoint, _id):
"""
append '_id' to endpoint if provided
"""
if _id is not None:
return '/'.join([endpoint.rstrip('/'), _id])
return endpoint | 60586a70bc8b9c9b10c1d54f6810c4528c5c0dec | 25,793 |
import time
import statistics
def records() -> dict:
""" Displays TJ's all time bests. """
records = cube.load_file("records")
times, people = records["records"], records["people"]
refresh = False
if "wca_token" in flask.session and "ion_token" in flask.session:
me = cube.api_call("wca", "me")["me"]
year = cube.api_call("ion", "profile")["graduation_year"]
if [me["url"], me["name"], year] not in people:
records["people"].append([me["url"], me["name"], year])
# New person added
refresh = True
cube.dump_file(records, "records")
if refresh or time.time() - records["time"] > cube.CONFIG["time"]:
cube.update_records()
(sing_rank, avg_rank), kinch_rank = cube.get_ranks()
cube.dump_file({"sor_single": sing_rank,
"sor_average": avg_rank,
"kinch": kinch_rank}, "wca/ranks")
return {"times": times, "events": cube.EVENTS, "icons": cube.ICONS, "DNF": statistics.DNF, "ranks": cube.RANKS} | c711634dcd7e06de481fc169d538b03735396cf0 | 25,794 |
import re
def get_info_media(title: str, ydl_opts=None, search_engine=None, result_count=1):
"""
:param title:
:param ydl_opts:
:param search_engine:
:param result_count:
:return:
"""
if ydl_opts is None:
ydl_opts = {
# 'format': 'best[ext!=wav]/best',
'quiet': False,
'ignoreerrors': True,
'noplaylist': True,
}
if re.match(regex_link, title):
url = title
else:
url = None
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.cache.remove()
if url:
try:
info = ydl.extract_info(url, download=False)
except DownloadError:
print('DownloadError')
return False
else:
if search_engine:
info = ydl.extract_info(f"{search_engine}{result_count}:{title}", download=False)
else:
try:
info = ydl.extract_info(f"ytsearch{result_count}:{title}", download=False)
except DownloadError:
try:
info = ydl.extract_info(f"scsearch{result_count}:{title}", download=False)
except DownloadError:
print('DownloadError')
return False
if not info:
return
if info['entries']:
return info['entries']
else:
return info | 03f8be75da183a818961e4fa2c08ac554dac5841 | 25,795 |
def export_post(request, style, format=-1):
"""
:param request:
:param style:
:param format:
:return:
"""
try:
payload = request.get_json(force=True) # post data in json
except:
payload = dict(request.form) # post data in form encoding
if not payload:
return {'error': 'no information received'}, 400
if 'bibcode' not in payload:
return {'error': 'no bibcode found in payload (parameter name is `bibcode`)'}, 400
if 'sort' in payload:
sort = read_value_list_or_not(payload, 'sort')
else:
sort = 'date desc, bibcode desc'
bibcodes = payload['bibcode']
if format == -1:
current_app.logger.info('received request with bibcodes={bibcodes} to export in {style} style using sort order={sort}'.
format(bibcodes=','.join(bibcodes), style=style, sort=sort))
else:
current_app.logger.info('received request with bibcodes={bibcodes} to export in {style} style with output format {format} using sort order={sort}'.
format(bibcodes=','.join(bibcodes), style=style, format=format, sort=sort))
# if in the test mode, return test solr data
if current_app.config['EXPORT_SERVICE_TEST_BIBCODE_GET'] == bibcodes:
return solrdata.data, 200
return get_solr_data(bibcodes=bibcodes, fields=default_solr_fields(), sort=sort, encode_style=adsFormatter().native_encoding(format)), 200 | 7d26bffd25c7557453906ba9438b893bc957223f | 25,796 |
def unpack_literal_map_to_sdk_object(literal_map, type_map=None):
"""
:param lytekit.models.literals.LiteralMap literal_map:
:param dict[Text, flytekit.common.types.base_sdk_types.FlyteSdkType] type_map: Type map directing unpacking.
:rtype: dict[Text, T]
"""
type_map = type_map or {}
return {k: get_sdk_value_from_literal(v, sdk_type=type_map.get(k, None)) for k, v in literal_map.literals.items()} | 1c5de0c99d7e43c0012bdc0ce97c549e0888f4be | 25,797 |
import requests
def get_data(user:str,num_last:int)->int:
"""获取关注者数数据,输出数据增量并返回数据;重试3次,全部失败则返回False"""
# error=None
global proxies
for i in range(3):
try:
num_this=requests.get('https://cdn.syndication.twimg.com/widgets/followbutton/info.json?screen_names='+user,proxies=proxies,timeout=(10,30)).json()[0]['followers_count']
# except requests.exceptions.ConnectTimeout:
# error=requests.exceptions.ConnectTimeout
# except requests.exceptions.ProxyError:
# error=requests.exceptions.ProxyError
# except requests.exceptions.ConnectionError:
# error=requests.exceptions.ConnectionError
except:
# error=sys.exc_info()[0]
continue
else:
print(num_this if num_last==0 else " " if num_this==num_last else ((PLUS if num_this>num_last else MINU)+str(abs(num_this-num_last)).rjust(6-LENGTH_OF_SIGN)),end=" | ")
return num_this
print(" 错误 ",end=" | ")
return False | c72a8b4272e7432cfa81ace7344dba469082bcdd | 25,798 |
import argparse
def parse_cli() -> dict:
"""
Parse CLI arguments.
:return: CLI Arguments dict.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--settings_path",
type=str,
help="Path to settings YAML-file for RailLabel.",
default="settings.yml",
)
parser.add_argument(
"-d",
"--dataset_path",
type=str,
help="Path to the directory containing a dataset.",
required=True,
)
return vars(parser.parse_args()) | bb822edf30178753df3740a7cc493575fc03a8ef | 25,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.