content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def print_insn_mnem(ea):
"""
Get instruction mnemonics
@param ea: linear address of instruction
@return: "" - no instruction at the specified location
@note: this function may not return exactly the same mnemonics
as you see on the screen.
"""
res = ida_ua.ua_mnem(ea)
if not res:
return ""
else:
return res | 5,328,400 |
def setdim(P, dim=None):
"""
Adjust the dimensions of a polynomial.
Output the results into Poly object
Args:
P (Poly) : Input polynomial
dim (int) : The dimensions of the output polynomial. If omitted,
increase polynomial with one dimension. If the new dim is
smaller then P's dimensions, variables with cut components are
all cut.
Examples:
>>> x,y = chaospy.variable(2)
>>> P = x*x-x*y
>>> print(chaospy.setdim(P, 1))
q0^2
"""
P = P.copy()
ldim = P.dim
if not dim:
dim = ldim+1
if dim==ldim:
return P
P.dim = dim
if dim>ldim:
key = np.zeros(dim, dtype=int)
for lkey in P.keys:
key[:ldim] = lkey
P.A[tuple(key)] = P.A.pop(lkey)
else:
key = np.zeros(dim, dtype=int)
for lkey in P.keys:
if not sum(lkey[ldim-1:]) or not sum(lkey):
P.A[lkey[:dim]] = P.A.pop(lkey)
else:
del P.A[lkey]
P.keys = sorted(P.A.keys(), key=sort_key)
return P | 5,328,401 |
def extract_file_from_zip(zipfile, filename):
"""
Returns the compressed file `filename` from `zipfile`.
"""
raise NotImplementedError()
return None | 5,328,402 |
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
image_width):
"""Transforms the box masks back to full image masks.
Embeds masks in bounding boxes of larger masks whose shapes correspond to
image shape.
Args:
box_masks: A tf.float32 tensor of size [num_masks, mask_height, mask_width].
boxes: A tf.float32 tensor of size [num_masks, 4] containing the box
corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
image_height: Image height. The output mask will have the same height as
the image height.
image_width: Image width. The output mask will have the same width as the
image width.
Returns:
A tf.float32 tensor of size [num_masks, image_height, image_width].
"""
# TODO(rathodv): Make this a public function.
def reframe_box_masks_to_image_masks_default():
"""The default function when there are more than 0 box masks."""
def transform_boxes_relative_to_boxes(boxes, reference_boxes):
boxes = tf.reshape(boxes, [-1, 2, 2])
min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)
max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1)
transformed_boxes = (boxes - min_corner) / (max_corner - min_corner)
return tf.reshape(transformed_boxes, [-1, 4])
box_masks_expanded = tf.expand_dims(box_masks, axis=3)
num_boxes = tf.shape(box_masks_expanded)[0]
unit_boxes = tf.concat(
[tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1)
reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes)
return tf.image.crop_and_resize(
image=box_masks_expanded,
boxes=reverse_boxes,
box_ind=tf.range(num_boxes),
crop_size=[image_height, image_width],
extrapolation_value=0.0)
image_masks = tf.cond(
tf.shape(box_masks)[0] > 0,
reframe_box_masks_to_image_masks_default,
lambda: tf.zeros([0, image_height, image_width, 1], dtype=tf.float32))
return tf.squeeze(image_masks, axis=3) | 5,328,403 |
def set_volume(entities: DottedDict, interface: InterfaceIO) -> None:
"""Set volume to a certain level."""
global _original_volume
new_volume = re.findall(r"\d+", entities.get("level", ""))
if not new_volume:
return
_spotify.set_volume(new_volume[0])
_original_volume = new_volume[0] | 5,328,404 |
def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
"""
Args:
cls_scores: (N, num_class)
box_preds: (N, 7 + C)
nms_config:
score_thresh:
Returns:
"""
pred_scores, pred_labels, pred_boxes = [], [], []
for k in range(cls_scores.shape[1]):
if score_thresh is not None:
scores_mask = (cls_scores[:, k] >= score_thresh)
box_scores = cls_scores[scores_mask, k]
cur_box_preds = box_preds[scores_mask]
else:
box_scores = cls_scores[:, k]
cur_box_preds = box_preds
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = cur_box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
pred_scores.append(box_scores[selected])
pred_labels.append(box_scores.new_ones(len(selected)).long() * k)
pred_boxes.append(cur_box_preds[selected])
pred_scores = torch.cat(pred_scores, dim=0)
pred_labels = torch.cat(pred_labels, dim=0)
pred_boxes = torch.cat(pred_boxes, dim=0)
return pred_scores, pred_labels, pred_boxes | 5,328,405 |
def _register_resolver() -> None:
"""Registers the cirq module's public classes for JSON serialization."""
from cirq.protocols.json_serialization import _internal_register_resolver
from cirq.json_resolver_cache import _class_resolver_dictionary
_internal_register_resolver(_class_resolver_dictionary) | 5,328,406 |
def check_str_length(str_to_check, limit=MAX_LENGTH):
"""Check the length of a string. If exceeds limit, then truncate it.
:type str_to_check: str
:param str_to_check: String to check.
:type limit: int
:param limit: The upper limit of the length.
:rtype: tuple
:returns: The string it self if not exceeded length, or truncated string
if exceeded and the truncated byte count.
"""
str_bytes = str_to_check.encode(UTF8)
str_len = len(str_bytes)
truncated_byte_count = 0
if str_len > limit:
truncated_byte_count = str_len - limit
str_bytes = str_bytes[:limit]
result = str(str_bytes.decode(UTF8, errors='ignore'))
return (result, truncated_byte_count) | 5,328,407 |
def fit(causal_model: ProbabilisticCausalModel, data: pd.DataFrame):
"""Learns generative causal models of nodes in the causal graph from data.
:param causal_model: The causal model containing the mechanisms that will be fitted.
:param data: Observations of nodes in the causal model.
"""
progress_bar = tqdm(causal_model.graph.nodes, desc='Fitting causal models', position=0, leave=True,
disable=not config.show_progress_bars)
for node in progress_bar:
if node not in data:
raise RuntimeError('Could not find data for node %s in the given training data! There should be a column '
'containing samples for node %s.' % (node, node))
progress_bar.set_description('Fitting causal mechanism of node %s' % node)
fit_causal_model_of_target(causal_model, node, data) | 5,328,408 |
def test_create_and_delete_my_bucket(make_stubber, make_unique_name, region, keep):
"""Test that running the demo with various AWS Regions and arguments works as
expected."""
stubber = make_stubber(demo_bucket_basics, 'get_s3', region)
s3 = demo_bucket_basics.get_s3(region)
bucket_name = make_unique_name('bucket')
stubber.stub_list_buckets([])
stubber.stub_create_bucket(bucket_name, region)
stubber.stub_head_bucket(bucket_name)
stubber.stub_list_buckets([s3.Bucket(bucket_name)])
if keep:
stubber.stub_head_bucket(bucket_name)
stubber.stub_delete_bucket(bucket_name)
else:
stubber.stub_delete_bucket(bucket_name)
stubber.stub_head_bucket(bucket_name, 404)
stubber.stub_list_buckets([])
stubber.stub_head_bucket_error(bucket_name, 404)
demo_bucket_basics.create_and_delete_my_bucket(bucket_name, region, keep)
if keep:
response = s3.meta.client.head_bucket(Bucket=bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
s3.Bucket(bucket_name).delete()
else:
with pytest.raises(ClientError) as exc_info:
s3.meta.client.head_bucket(Bucket=bucket_name)
assert exc_info.value.response['Error']['Code'] == '404' | 5,328,409 |
def run(problem, **kwargs):
"""
A single run with a specific set of performance parameters.
"""
setup = model_type[problem]['setup']
options = {}
time_order = kwargs.pop('time_order')[0]
space_order = kwargs.pop('space_order')[0]
autotune = kwargs.pop('autotune')
block_shapes = as_tuple(kwargs.pop('block_shape'))
# Should a specific block-shape be used? Useful if one wants to skip
# the autotuning pass as a good block-shape is already known
if block_shapes:
# The following piece of code is horribly hacky, but it works for now
for i, block_shape in enumerate(block_shapes):
bs = [int(x) for x in block_shape.split()]
# If hierarchical blocking is activated, say with N levels, here in
# `bs` we expect to see 3*N entries
levels = [bs[x:x+3] for x in range(0, len(bs), 3)]
for n, level in enumerate(levels):
if len(level) != 3:
raise ValueError("Expected 3 entries per block shape level, "
"but got one level with only %s entries (`%s`)"
% (len(level), level))
for d, s in zip(['x', 'y', 'z'], level):
options['%s%d_blk%d_size' % (d, i, n)] = s
solver = setup(space_order=space_order, time_order=time_order, **kwargs)
solver.forward(autotune=autotune, **options) | 5,328,410 |
async def test_snips_say(hass):
"""Test snips say with invalid config."""
calls = async_mock_service(hass, "snips", "say", snips.SERVICE_SCHEMA_SAY)
data = {"text": "Hello"}
await hass.services.async_call("snips", "say", data)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].domain == "snips"
assert calls[0].service == "say"
assert calls[0].data["text"] == "Hello" | 5,328,411 |
async def create_or_update(
hub,
ctx,
name,
resource_group,
prefix_length,
sku="standard",
public_ip_address_version="IPv4",
zones=None,
**kwargs,
):
"""
.. versionadded:: 4.0.0
Creates or updates a static or dynamic public IP prefix.
:param name: The name of the public IP prefix.
:param resource_group: The resource group of the public IP prefix.
:param prefix_length: An integer representing the length of the Public IP Prefix. This value is immutable
once set. If the value of the ``public_ip_address_version`` parameter is "IPv4", then possible values include
28, 29, 30, 31. If the value of the ``public_ip_address_version`` parameter is "IPv6", then possible values
include 124, 125, 126, 127.
:param sku: The name of a public IP prefix SKU. Possible values include: "standard". Defaults to "standard".
:param public_ip_address_version: The public IP address version. Possible values include: "IPv4" and "IPv6".
Defaults to "IPv4".
:param zones: A list of availability zones that denotes where the IP allocated for the resource needs
to come from.
CLI Example:
.. code-block:: bash
azurerm.network.public_ip_prefix.create_or_update test_name test_group test_length
"""
if "location" not in kwargs:
rg_props = await hub.exec.azurerm.resource.group.get(
ctx, resource_group, **kwargs
)
if "error" in rg_props:
log.error("Unable to determine location from resource group specified.")
return {
"error": "Unable to determine location from resource group specified."
}
kwargs["location"] = rg_props["location"]
if sku:
sku = {"name": sku.lower()}
result = {}
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
prefix_model = await hub.exec.azurerm.utils.create_object_model(
"network",
"PublicIPPrefix",
prefix_length=prefix_length,
sku=sku,
public_ip_address_version=public_ip_address_version,
zones=zones,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
prefix = netconn.public_ip_prefixes.create_or_update(
resource_group_name=resource_group,
public_ip_prefix_name=name,
parameters=prefix_model,
)
prefix.wait()
result = prefix.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result | 5,328,412 |
def _get_job_name(job_label: str = None) -> str:
"""Returns Beam runner job name.
Args:
job_label: A user defined string that helps define the job.
Returns:
A job name compatible with apache beam runners, including a time stamp to
insure uniqueness.
"""
job_name = 'tfrecorder-' + common.get_timestamp()
if job_label:
job_label = job_label.replace('_', '-')
job_name += '-' + job_label
return job_name | 5,328,413 |
def process_provinces(data: Mapping[str, Union[str, int]]) -> List:
"""Returns list of all provinces from data file
Args:
data (Mapping): A dictionary of province data
"""
for province in data:
yield [province.get('code'), province.get('name')] | 5,328,414 |
def init_json():
""" Initialize two JSON files to store results """
with open("./reachable.json","w") as f:
json.dump({},f)
with open("unreachable.json","w") as f:
json.dump({},f) | 5,328,415 |
def all_not_none(*args):
"""Shorthand function for ``all(x is not None for x in args)``. Returns
True if all `*args` are not None, otherwise False."""
return all(x is not None for x in args) | 5,328,416 |
def weighted_photon_spec(eng):
""" Returns the weighted photon spectrum from positronium annihilation.
This assumes 3/4 ortho- and 1/4 para-, normalized to a single
annihilation.
Parameters
----------
eng : ndarray
The energy abscissa.
Returns
-------
Spectrum
The resulting photon :class:`.Spectrum` object.
"""
return 3/4*ortho_photon_spec(eng) + 1/4*para_photon_spec(eng) | 5,328,417 |
def get_for_tag(app_name):
"""
Retorna a tag for customizada para listar registros no template list.html
:param app_name: Nome do app que está sendo criado
:type app_name: str
"""
return "{% for " + app_name + " in " + app_name + "s %}" | 5,328,418 |
def perform_log(tc, args={}):
"""Collects all counters and tracks them"""
opt_print = args.get('print', False)
properties = get_properties()
if print:
print(properties)
cpu = get_cpu_counters(args)
if len(cpu) > 0:
track_dict_as_metric(tc, cpu, properties=properties)
if opt_print:
print(cpu)
memory = get_memory_counters(args)
if len(memory) > 0:
track_dict_as_metric(tc, memory, properties=properties)
if opt_print:
print(memory)
disk = get_disk_counters(args)
if len(disk) > 0:
track_dict_as_metric(tc, disk, properties=properties)
if opt_print:
print(disk)
network = get_network_counters(args)
if len(network) > 0:
track_dict_as_metric(tc, network, properties=properties)
if opt_print:
print(network)
tc.flush() | 5,328,419 |
def print_names(pair):
""""Print pair names
Args:
pair: is a tuple containing the players that match user input
Return: None
prints each pair match to screen in the order
the small player on the left and the tall on the right
"""
player1, player2 = pair
p1 = players[player1].get('first_name') + ' ' +\
players[player1].get('last_name')
p2 = players[player2].get('first_name') + ' ' +\
players[player2].get('last_name')
print('- {0:18} \t{1}'.format(p1, p2)) | 5,328,420 |
def _reloadFn(*args):
"""Placeholder callback function for :func:`_handleSIGHUP`."""
return True | 5,328,421 |
def interpolation_lagrange_matrix(old_grid, new_grid):
"""
Evaluate lagrange matrix to interpolate state and control values from the solved grid onto the new grid.
Parameters
----------
old_grid : <GridData>
GridData object representing the grid on which the problem has been solved.
new_grid : <GridData>
GridData object representing the new, higher-order grid.
Returns
-------
ndarray
The lagrange interpolation matrix.
"""
L_blocks = []
D_blocks = []
for iseg in range(old_grid.num_segments):
i1, i2 = old_grid.subset_segment_indices['all'][iseg, :]
indices = old_grid.subset_node_indices['all'][i1:i2]
nodes_given = old_grid.node_stau[indices]
i1, i2 = new_grid.subset_segment_indices['all'][iseg, :]
indices = new_grid.subset_node_indices['all'][i1:i2]
nodes_eval = new_grid.node_stau[indices]
L_block, D_block = lagrange_matrices(nodes_given, nodes_eval)
L_blocks.append(L_block)
D_blocks.append(D_block)
L = block_diag(*L_blocks)
D = block_diag(*D_blocks)
return L, D | 5,328,422 |
def bfs_paths(graph, start, end):
"""
return shortest path from start to end in graph
"""
# queue is list of tuples containing (vertex, path)
assert start in graph.keys()
assert end in graph.keys()
queue = [(start, [start])]
while queue:
vertex, path = queue.pop(0)
for nx in graph[vertex]-set(path):
if nx == end:
yield path+[nx]
else:
queue.append((nx,path+[nx])) | 5,328,423 |
def test_calculate_source_not_string(source):
"""
GIVEN source
WHEN calculate is called with the source
THEN InvalidInputError is raised.
"""
with pytest.raises(errors.InvalidInputError):
calculate(source) | 5,328,424 |
def optimize_shim(coils, unshimmed, mask, mask_origin=(0, 0, 0), bounds=None):
"""
Optimize unshimmed volume by varying current to each channel
Args:
coils (numpy.ndarray): X, Y, Z, N coil map
unshimmed (numpy.ndarray): 3D B0 map
mask (numpy.ndarray): 3D integer mask used for the optimizer (only consider voxels with non-zero values).
mask_origin (tuple): Mask origin if mask volume does not cover unshimmed volume
bounds (list): List of ``(min, max)`` pairs for each coil channels. None
is used to specify no bound.
Returns:
numpy.ndarray: Coefficients corresponding to the coil profiles that minimize the objective function
(coils.size)
"""
# cmap = plt.get_cmap('bone')
# cmap.set_bad('black')
# mag_fig, mag_ax = plt.subplots(1, 1)
# plotter_mag = Slice_Plotter(mag_ax, np.transpose((unshimmed), axes=(1, 0, 2)), f'Unshimmed Full', cmap=cmap)
# mag_fig.canvas.mpl_connect('scroll_event', plotter_mag.onscroll)
# plt.show(block=True)
# plt.close()
mask_range = tuple([slice(mask_origin[i], mask_origin[i] + mask.shape[i]) for i in range(3)])
mask_vec = mask.reshape((-1,))
# Least squares solver
N = coils.shape[3]
# Reshape coil profile: X, Y, Z, N --> [mask.shape], N
# --> N, [mask.shape] --> N, mask.size --> mask.size, N
coil_mat = np.reshape(np.transpose(coils[mask_range], axes=(3, 0, 1, 2)),
(N, -1)).T
coil_mat = coil_mat[mask_vec != 0, :] # masked points x N
unshimmed = unshimmed[mask_range]
unshimmed_vec = np.reshape(unshimmed, (-1,)) # mV
unshimmed_vec = unshimmed_vec[mask_vec != 0] # mV'
# Set up output currents and optimize
if bounds is not None:
bounds = np.asarray(bounds)
currents_0 = np.zeros(N)
currents_sp = opt.least_squares(shim_residuals, currents_0,
args=(unshimmed_vec, coil_mat), bounds=bounds)
currents = currents_sp.x
residuals = np.asarray(currents_sp.fun)
return (currents, residuals) | 5,328,425 |
def _log(x1):
"""closure of log for zero arguments, sign-protected"""
with np.errstate(divide="ignore", invalid="ignore"):
x1 = np.where(np.abs(x1) > 0.001, x1, 1)
return np.where(x1 < -1, np.log(np.abs(x1)) * np.sign(x1), np.log(np.abs(x1))) | 5,328,426 |
def whats_the_meaning_of_life(n_cores=23):
"""Answers the question about the meaning of life.
You don't even have to ask the question, it will figure it out for you.
Don't use more cores than available to mankind.
Parameters
----------
n_cores: int [default: 23]
The number of CPU cores to use.
Returns
-------
int
The type of the expected answer is of course an integer.
"""
return 42 | 5,328,427 |
def get_good_start(system, numdistricts):
"""
Basically, instead of starting with a really bad initial solution for
simulated annealing sometimes we can rig it to start with a decent one...
"""
print('Acquiring a good initial solution')
solution = Solution(system, numdistricts)
solution.generate_random_solution() # start with random solution
for i in tqdm(range(100)):
new_solution = Solution(system, numdistricts)
new_solution.generate_random_solution()
if new_solution.value > solution.value:
solution = new_solution
print('Starting with Solution[{}]'.format(solution.value))
return solution | 5,328,428 |
def test_cav():
"""Test module cav.py by downloading
cav.csv and testing shape of
extracted data has 138 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = cav(test_path)
try:
assert x_train.shape == (138, 2)
except:
shutil.rmtree(test_path)
raise() | 5,328,429 |
def test_valid_version():
"""Check that the package defines a valid ``__version__``."""
v_curr = parse_version(pypkg_gh_releases_01.__version__)
v_orig = parse_version("0.1.0-dev")
assert v_curr >= v_orig | 5,328,430 |
def search_up(word_list, matrix):
"""Search words from word_list in matrix, up direction
:param word_list - list of strings
:param matrix - list of lists
:return list of lists"""
return straight_search(word_list, matrix, True, False) | 5,328,431 |
def int_validator(inp, ifallowed):
"""
Test whether only (positive) integers are being keyed into a widget.
Call signature: %S %P
"""
if len(ifallowed) > 10:
return False
try:
return int(inp) >= 0
except ValueError:
return False
return True | 5,328,432 |
def _check_config_aurora(configuration):
"""
Internal function to check configuration of an aurora
"""
if "serial port" not in configuration:
configuration.update({"serial port": -1})
if "number of ports to probe" not in configuration:
configuration.update({"ports to probe" : 20}) | 5,328,433 |
def clear():
"""
Clears the console platform-specific.
"""
p = platform.system()
if p == 'Windows':
subprocess.call('cls', shell=False)
elif p in ['Linux', 'Darwin']:
subprocess.call('clear', shell=False) | 5,328,434 |
def delete_site_files(site):
"""Deletes all site content and configuration files."""
files = ["/etc/nginx/director.d/{}.conf", "/etc/php/7.0/fpm/pool.d/{}.conf", "/etc/supervisor/director.d/{}.conf"]
files = [x.format(site.name) for x in files]
for f in files:
if os.path.isfile(f):
try:
os.remove(f)
except OSError:
client.captureException()
try:
shutil.rmtree(site.path)
except Exception:
client.captureException() | 5,328,435 |
def scrape_page(url):
"""
scrape page by url and return its html
:param url: page url
:return: html of page
"""
logging.info('scraping %s...', url)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
logging.error('get invalid status code %s while scraping %s', response.status_code, url)
except requests.RequestException:
logging.error('error occurred while scraping %s', url, exc_info=True) | 5,328,436 |
def getProductionUrl(code,d0):
"""Get the url for outage data from d0 to d1."""
url = getUrl('png',code,2018,opts=[[None]])
url = url.replace('__datehere__',eomf.m2s(d0),)
return url | 5,328,437 |
def has_gaps_in_region(read, region):
"""
Returns True if the given pysam read spans the given pybedtools.Interval,
``region``.
"""
# If the given read has gaps in its alignment to the reference inside the
# given interval (more than one block inside the SV event itself), there are
# gaps inside the SV.
tree = intervaltree.IntervalTree()
for block in read.get_blocks():
tree[block[0]:block[1]] = block
return len(tree[region.start:region.end]) > 1 | 5,328,438 |
def zha_device_joined(opp, setup_zha):
"""Return a newly joined ZHA device."""
async def _zha_device(zigpy_dev):
await setup_zha()
zha_gateway = get_zha_gateway(opp)
await zha_gateway.async_device_initialized(zigpy_dev)
await opp.async_block_till_done()
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device | 5,328,439 |
async def unlock_perm(message: Message):
""" unlock chat permissions from tg group """
unlock_type = message.input_str
chat_id = message.chat.id
if not unlock_type:
await message.err(r"I Can't Unlock Nothing! (-‸ლ)")
return
if unlock_type == "all":
try:
await message.client.set_chat_permissions(
chat_id,
ChatPermissions(can_send_messages=True,
can_send_media_messages=True,
can_send_stickers=True,
can_send_animations=True,
can_send_games=True,
can_use_inline_bots=True,
can_send_polls=True,
can_change_info=True,
can_invite_users=True,
can_pin_messages=True,
can_add_web_page_previews=True))
await message.edit(
"**🔓 Unlocked all permission from this Chat!**", del_in=5)
await CHANNEL.log(
f"#UNLOCK\n\nCHAT: `{message.chat.title}` (`{chat_id}`)\n"
f"PERMISSIONS: `All Permissions`")
except Exception as e_f:
await message.edit(
r"`i don't have permission to do that >︿<`\n\n"
f"**ERROR:** `{e_f}`", del_in=5)
return
if unlock_type in _types:
(umsg, umedia, ustickers,
uanimations, ugames, uinlinebots,
uwebprev, upolls, uinfo, uinvite,
upin, uperm) = _get_chat_lock(message, unlock_type, False)
else:
await message.err(r"Invalid Unlock Type! ¯\_(ツ)_/¯")
return
try:
await message.client.set_chat_permissions(
chat_id,
ChatPermissions(can_send_messages=umsg,
can_send_media_messages=umedia,
can_send_stickers=ustickers,
can_send_animations=uanimations,
can_send_games=ugames,
can_use_inline_bots=uinlinebots,
can_add_web_page_previews=uwebprev,
can_send_polls=upolls,
can_change_info=uinfo,
can_invite_users=uinvite,
can_pin_messages=upin))
await message.edit(f"**🔓 Unlocked {uperm} for this chat!**", del_in=5)
await CHANNEL.log(
f"#UNLOCK\n\nCHAT: `{message.chat.title}` (`{chat_id}`)\n"
f"PERMISSIONS: `{uperm} Permission`")
except Exception as e_f:
await message.edit(
r"`i don't have permission to do that >︿<`\n\n"
f"**ERROR:** `{e_f}`", del_in=5) | 5,328,440 |
def test_logical_operators():
"""Logical operators
@see: https://www.w3schools.com/python/python_operators.asp
Logical operators are used to combine boolean values.
"""
# Let's work with these numbers to illustrate logic operators.
first_number = 5
second_number = 10
# and
# Returns True if both statements are true.
assert first_number > 0 and second_number < 20
# or
# Returns True if one of the statements is true
assert first_number > 5 or second_number < 20
# not
# Reverse the result, returns False if the result is true.
# pylint: disable=unneeded-not
assert not first_number == second_number
assert first_number != second_number | 5,328,441 |
def get_all_list_data(request_context, function, *args, **kwargs):
"""
Make a function request with args and kwargs and iterate over the "next" responses until exhausted.
Return initial response json data or all json data as a single list. Responses that have a series of
next responses (as retrieved by get_next generator) are expected to have data returned as a list.
If an exception is raised during the initial function call or in the process of paging over results,
that exception will be bubbled back to the caller and any intermediary results will be lost. Worst case
complexity O(n).
:param RequestContext request_context: The context required to make an API call
:param function function: The API function to call
:return: A list of all json data retrieved while iterating over response links, or the initial json
function response if there are no paged results
:rtype: list of json data or json
"""
response = function(request_context, *args, **kwargs)
data = response.json()
for next_response in get_next(request_context, response):
data.extend(next_response.json())
return data | 5,328,442 |
def bdd_common_after_all(context_or_world):
"""Common after all method in behave or lettuce
:param context_or_world: behave context or lettuce world
"""
# Close drivers
DriverWrappersPool.close_drivers(scope='session', test_name='multiple_tests',
test_passed=context_or_world.global_status['test_passed'])
# Update tests status in Jira
change_all_jira_status() | 5,328,443 |
def test_primitive_error(source, location):
"""
GIVEN source and location
WHEN primitive is called with the source and location
THEN InvalidJsonError is raised.
"""
with pytest.raises(InvalidJsonError):
primitive(source=source, current_location=location) | 5,328,444 |
def is_outlier(x, check_finite=False, confidence=3):
"""Boolean mask with outliers
:param x: vector
:param check_finite:
:param confidence: confidence level: 1, 2, 3 or 4, which correspond to
90%, 95%, 99% and 99.9% two-tailed confidence respectively (normal
distribution). Default: 3 (99%)
:type x: numpy.ndarray
:type check_finite: bool
:type confidence: int
:return: vector with condition "is `x` outlier?"
"""
return np.logical_not(
is_not_outlier(x, check_finite=check_finite, confidence=confidence)) | 5,328,445 |
def stateless_shuffle(value, seed):
"""Randomly shuffles a tensor, statelessly."""
flat_value = tf.reshape(value, [-1])
indices = tf.argsort(
tf.random.stateless_uniform(tf.shape(flat_value), seed=seed))
flat_shuffle = tf.gather(flat_value, indices)
return tf.reshape(flat_shuffle, tf.shape(value)) | 5,328,446 |
def is_one_line_function_declaration_line(line: str) -> bool: # pylint:disable=invalid-name
"""
Check if line contains function declaration.
"""
return 'def ' in line and '(' in line and '):' in line or ') ->' in line | 5,328,447 |
def check_if_ended(id):
"""
Check if the course has already ended.
:param id: Id of the course that needs to be checked.
:type id: int
:return: If a course has ended
:rtype: bool
"""
course = moodle_api.get_course_by_id_field(id)
end_date = course['courses'][0]['enddate']
if(dt.datetime.fromtimestamp(end_date) < dt.datetime.today()):
return True
else:
return False | 5,328,448 |
def get_all_applications(user, timeslot):
"""
Get a users applications for this timeslot
:param user: user to get applications for
:param timeslot: timeslot to get the applications.
:return:
"""
return user.applications.filter(Proposal__TimeSlot=timeslot) | 5,328,449 |
def get_console_url(args):
""" Get a console login URL """
# Get credentials, maybe assume the role
session_creds = get_credentials(args)
# build the token request and fetch the sign-in token
url = request_signin_token(args, session_creds)
r = requests.get(url,timeout=200.0)
if r.status_code != 200:
vprint('Error: Getting SigninToken', r.url)
vprint(r.content)
raise Exception(f'Bad response requesting signin token {r.reason}')
sin_token = r.json()['SigninToken']
# build the console signin url
sin_url = request_console_login(sin_token)
if args.output:
return sin_url
else:
vprint(f'Opening webbrowser for {sin_url}')
webbrowser.open(sin_url)
return None | 5,328,450 |
def get_uq_samples(config, campaign_dir, number_of_samples, skip=0):
"""
Copies UQ ensemble results from the local FabSim directory to the local
EasyVVUQ work directory. Does not fetch the results from the (remote)
host. For this, use the fetch_results() subroutine.
Parameters
----------
config : string
Name of the config directory.
campaign_dir : string
Name of the EasyVVUQ campaign directory (campaign.campaign_dir)
number_of_samples : int
The total number of samples in the ensemble.
skip : int, optional
Number of runs to skip. The default is 0. If skip=10, only run directories
with name run_I, with I > 10, will be submitted to the machine. Used in adaptive
sampling to avoid repeating already computed runs.
Returns
-------
None.
"""
#loop through all result dirs to find result dir of sim_ID
found = False
dirs = os.listdir(env.local_results)
for dir_i in dirs:
#We are assuming here that the name of the directory with the runs dirs
#STARTS with the config name. e.g. <config_name>_eagle_vecma_28 and
#not PJ_header_<config_name>_eagle_vecma_28
if config == dir_i[0:len(config)]:
found = True
break
if found:
#This compies the entire result directory from the (remote) host back to the
#EasyVVUQ Campaign directory
print('Copying results from', env.local_results + '/' + dir_i + 'to' + campaign_dir)
ensemble2campaign(env.local_results + '/' + dir_i, campaign_dir, skip)
#If the same FabSim3 config name was used before, the statement above
#might have copied more runs than currently are used by EasyVVUQ.
#This removes all runs in the EasyVVUQ campaign dir (not the Fabsim results dir)
#for which Run_X with X > number of current samples.
dirs = os.listdir(path.join(campaign_dir, 'runs'))
for dir_i in dirs:
run_id = int(dir_i.split('_')[-1])
if run_id > int(number_of_samples):
local('rm -r %s/runs/run_%d' % (campaign_dir, run_id))
print('Removing Run %d from %s/runs' % (run_id, campaign_dir))
else:
print('Campaign dir not found') | 5,328,451 |
def test_factorise_numbers():
"""Only natural numbers > 1 are taken into process."""
assert factorise(2) == {2: 1}
assert factorise(3) == {3: 1}
assert factorise(4) == {2: 2}
assert factorise(10) == {2: 1, 5: 1}
assert factorise(24) == {2: 3, 3: 1}
assert factorise(30) == {2: 1, 3: 1, 5: 1}
assert factorise(32) == {2: 5}
assert factorise(41) == {41: 1}
assert factorise(48) == {2: 4, 3: 1}
assert factorise(60) == {2: 2, 3: 1, 5: 1} | 5,328,452 |
async def test_abort_if_already_setup(hass):
"""Test we abort if component is already setup."""
MockConfigEntry(
domain=DOMAIN,
data=CONFIG_DATA,
).add_to_hass(hass)
with patch(
"homeassistant.components.asuswrt.config_flow.socket.gethostbyname",
return_value=IP_ADDRESS,
):
# Should fail, same HOST (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=CONFIG_DATA,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed" | 5,328,453 |
def store_to_db(timezone, config, data, auth_code):
"""Stores the data in the backend database with a HTTP POST request."""
if data == {}:
logging.error('Received no data, stopping')
return
data['timestamp'] = get_timestamp(timezone)
data = OrderedDict(sorted(data.items()))
resp = requests.post(config['upload_url'],
params={'obs-string': json.dumps(data),
'code': auth_code})
logging.info("Weather observation request data: '%s', response: code %s, text '%s'",
json.dumps(data), resp.status_code, resp.text) | 5,328,454 |
def input_layer_from_space(space):
"""
create tensorlayer input layers from env.space input
:param space: env.space
:return: tensorlayer input layer
"""
if isinstance(space, Box):
return input_layer(space.shape)
elif isinstance(space, Discrete):
return tl.layers.Input(dtype=tf.int32, shape=(None, ))
raise NotImplementedError | 5,328,455 |
async def test_invalid_host(hass):
"""Test the failure when invalid host provided."""
client = ClientMock()
client.is_offline = True
with patch("twinkly_client.TwinklyClient", return_value=client):
result = await hass.config_entries.flow.async_init(
TWINKLY_DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_ENTRY_HOST: "dummy"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {CONF_ENTRY_HOST: "cannot_connect"} | 5,328,456 |
def searchable_paths(env_vars=PATH_VARS):
"""
Return a list of directories where to search "in the PATH" in the provided
``env_vars`` list of PATH-like environment variables.
"""
dirs = []
for env_var in env_vars:
value = os.environ.get(env_var, '') or ''
dirs.extend(value.split(os.pathsep))
dirs = [os.path.realpath(d.strip()) for d in dirs if d.strip()]
return tuple(d for d in dirs if os.path.isdir(d)) | 5,328,457 |
def _init_app():
""" Intializes the dash app."""
this_dir = os.path.dirname(os.path.abspath(__file__))
css_file = os.path.join(this_dir, "stylesheet.css")
app = dash.Dash(
__name__,
external_stylesheets=[css_file],
suppress_callback_exceptions=True,
)
return app | 5,328,458 |
def test_get_jobs_status(
globals, urls, client, mock_test_responses, context_fixture):
"""Context shows jobs status in dataframe with specific status."""
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=CoreStatus.DONE)
responses.add(
responses.GET, urls('task', 'upload'),
json={
'_id': globals['upload'],
'status': CoreStatus.FAIL
},
status=200,
content_type='application/json')
for i in range(2):
client.upload(file=globals['test_csv_file'], name=str(i))
context.run()
job_fail = context.get_jobs_status(status=['fail'])
assert job_fail.iloc[0]['status'] == 'fail'
assert job_fail.iloc[0]['Job'] == '1' | 5,328,459 |
def get_application_registry():
"""Return the application registry. If :func:`set_application_registry` was never
invoked, return a registry built using :file:`defaults_en.txt` embedded in the pint
package.
:param registry: a UnitRegistry instance.
"""
return _APP_REGISTRY | 5,328,460 |
def get_dataset(cfg, designation):
"""
Return a Dataset for the given designation ('train', 'valid', 'test').
"""
dataset = importlib.import_module('.' + cfg['dataset'], __package__)
return dataset.create(cfg, designation) | 5,328,461 |
def invite(email, inviter, user=None, sendfn=send_invite, resend=True,
**kwargs):
"""
Invite a given email address.
Returns a ``(User, sent)`` tuple similar to the Django
:meth:`django.db.models.Manager.get_or_create` method.
If a user is passed in, reinvite the user. For projects that support
multiple users with the same email address, it is necessary to pass in the
user to avoid throwing a MultipleObjectsReturned error.
If a user with ``email`` address does not exist:
* Creates a user object
* Set ``user.email = email``
* Set ``user.is_active = False``
* Set a random password
* Send the invitation email
* Return ``(user, True)``
If a user with ``email`` address exists and ``user.is_active == False``:
* Re-send the invitation
* Return ``(user, True)``
If a user with ``email`` address exists:
* Don't send the invitation
* Return ``(user, False)``
If the email address is blocked:
* Don't send the invitation
* Return ``(None, False)``
To customize sending, pass in a new ``sendfn`` function as documented by
:attr:`inviter2.utils.send_invite`:
::
sendfn = lambda invitee, inviter, **kwargs: 1
invite("foo@bar.com", request.user, sendfn = sendfn)
:param email: The email address
:param inviter: The user inviting the email address
:param pk: The pk of an existing user to be reinvited.
:param sendfn: An email sending function. Defaults to
:attr:`inviter2.utils.send_invite`
:param resend: Resend email to users that are not registered yet
"""
if OptOut.objects.is_blocked(email):
return None, False
try:
if not user:
user = User.objects.get(email=email)
if user.is_active:
return user, False
if not resend:
return user, False
except User.DoesNotExist:
username_field = getattr(User, 'USERNAME_FIELD', 'username')
if username_field == 'username':
user = create_inactive_user(email=email, username=uuid())
else:
user = create_inactive_user(email=email)
url_parts = int_to_base36(user.id), token_generator.make_token(user)
url = reverse('{}:register'.format(NAMESPACE), args=url_parts)
opt_out_url = reverse('{}:opt-out'.format(NAMESPACE), args=url_parts)
kwargs.update(opt_out_url=opt_out_url)
sendfn(user, inviter, url=url, **kwargs)
return user, True | 5,328,462 |
def RestrictDictValues( aDict, restrictSet ):
"""Return a dict which has the mappings from the original dict only for values in the given set"""
return dict( item for item in aDict.items() if item[1] in restrictSet ) | 5,328,463 |
def pump_impact(request):
"""
Ajax controller that prepares and submits the new pump impact jobs and workflow.
"""
session = None
try:
session_id = request.session.session_key
resource_id = request.POST.get('resource_id')
pumps = request.POST.get('data')
tool = request.POST.get('tool')
cancel_status = request.POST.get('cancel', '')
stream_package = request.POST.get('package', '')
if stream_package == "":
stream_package = "_"
layer = parseIntSet(request.POST.get('layer', '0'))
stress_period = parseIntSet(request.POST.get('stress_period_output', '0'))
data_input = request.POST.get('input')
if not resource_id:
return JsonResponse({'success': False, 'message': 'No resource ID given. Check URL for ID.'})
Session = app.get_persistent_store_database('primary_db', as_sessionmaker=True)
session = Session()
resource = session.query(ModflowModelResource).get(resource_id)
if cancel_status:
max_wait_time = 20
job_id = get_job_id(resource_id, session, max_wait_time)
job_manager = app.get_job_manager()
running_job = job_manager.get_job(job_id)
# Stop running job
running_job.stop()
# Clear job_id
resource.set_attribute('job_id', '')
session.commit()
return JsonResponse({'success': True, 'message': 'Well Influence Tool has been cancelled.'})
else:
xll = resource.get_attribute('xll')
yll = resource.get_attribute('yll')
rotation = resource.get_attribute('rotation')
model_units = resource.get_attribute('model_units')
model_version = resource.get_attribute('model_version')
srid = resource.get_attribute('srid')
database_id = resource.get_attribute('database_id')
# Writing geojson file for pumps to be passed to condor worker
model_db = ModelFileDatabase(app=app, database_id=database_id)
pump_json_file = os.path.join(model_db.directory, "well_impact.json")
geojson = open(pump_json_file, "w")
geojson.write(pumps + "\n")
geojson.close()
# setting up spatial manager to get model file list, and modflow executables
gs_engine = app.get_spatial_dataset_service(app.GEOSERVER_NAME, as_engine=True)
spatial_manager = ModflowSpatialManager(geoserver_engine=gs_engine,
model_file_db_connection=model_db.model_db_connection,
modflow_version=model_version)
modflow_exe = os.path.join(spatial_manager.EXE_PATH, model_version)
model_file_list = spatial_manager.model_file_db.list()
# Loop through model file database for needed files
wel_file = '_'
hds_file = '_'
# cbb_file = '_'
nam_file = ''
for file in model_file_list:
# TODO: figure out .mfn problems
if file.split(".")[-1] == 'nam':
nam_file = file
if file.split(".")[-1] == 'hds':
hds_file = file
if file.split(".")[-1] == 'wel':
wel_file = file
# if file.split(".")[-1] == 'cbb':
# cbb_file = file
user_workspace = app.get_user_workspace(request.user)
user_workspace_path = user_workspace.path
# Django validation. After Django2.0, is_authenticated is a property
try:
check_user = request.user.is_authenticated()
except: # noqa: E722
check_user = request.user.is_authenticated
# Used to get a valid Django anonymous user if not signed in
if check_user:
user = request.user
else:
user = get_anonymous_user()
try:
if tool == 'drawdown':
job = DrawdownWorkflow(
user=user,
workspace=user_workspace_path,
session_id=session_id,
xll=xll,
yll=yll,
rotation=rotation,
db_dir=model_db.directory,
model_units=model_units,
model_version=model_version,
modflow_exe=modflow_exe,
nam_file=nam_file,
hds_file=hds_file,
wel_file=wel_file,
srid=srid,
resource_id=resource.id,
database_id=database_id,
app_package=app.package,
contour_levels=data_input,
export_layer_string=layer,
export_sp_string=stress_period,
)
elif tool == 'stream_depletion':
job = StreamDepletionWorkflow(
user=user,
workspace=user_workspace_path,
session_id=session_id,
xll=xll,
yll=yll,
rotation=rotation,
db_dir=model_db.directory,
model_units=model_units,
model_version=model_version,
modflow_exe=modflow_exe,
nam_file=nam_file,
wel_file=wel_file,
srid=srid,
resource_id=resource.id,
database_id=database_id,
app_package=app.package,
stream_package=stream_package,
std_minimum_change=data_input,
export_layer_string=layer,
export_sp_string=stress_period,
)
else:
data_input = data_input.split(";")
contour_levels = data_input[0]
std_minimum_change = data_input[1]
job = RunAllToolsWorkflow(
user=user,
workspace=user_workspace_path,
session_id=session_id,
xll=xll,
yll=yll,
rotation=rotation,
db_dir=model_db.directory,
model_units=model_units,
model_version=model_version,
modflow_exe=modflow_exe,
nam_file=nam_file,
wel_file=wel_file,
srid=srid,
resource_id=resource.id,
database_id=database_id,
app_package=app.package,
stream_package=stream_package,
std_minimum_change=std_minimum_change,
export_layer_string=layer,
export_sp_string=stress_period,
contour_levels=contour_levels,
)
job.run_job()
workflow_id = job.workflow.id
remote_id = job.workflow.remote_id
# Save job_id in resource so we can cancel it if necessary.
resource.set_attribute('job_id', workflow_id)
session.commit()
return JsonResponse({'success': True, 'resource_id': resource_id, 'workflow_id': workflow_id,
'remote_id': remote_id})
except Exception as e:
log.exception(str(e))
return JsonResponse({'success': False,
'message': 'An unexpected error has occurred.'
' Please contact Aquaveo and try again later.'
})
finally:
session and session.close() | 5,328,464 |
def classify_images(images_dir, petlabel_dic, model):
"""
Creates classifier labels with classifier function, compares labels, and
creates a dictionary containing both labels and comparison of them to be
returned.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images in this function.
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by pretrained CNN models (string)
petlabel_dic - Dictionary that contains the pet image(true) labels
that classify what's in the image, where its' key is the
pet image filename & it's value is pet image label where
label is lowercase with space between each word in label
model - pretrained CNN whose architecture is indicated by this parameter,
values must be: resnet alexnet vgg (string)
Returns:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
"""
results = {}
# loop through each image in the IMAGE_DIR
for image in listdir(images_dir):
# get the pet label determined by the image filename (see get_pet_labels function)
pet_label = petlabel_dic[image]
# create the full path to the image file
image_path = image_dir + image
# classify the image using the model, leveraging the prebuilt function classifer
classified_label = classifier(image_path, model)
# call split on CLASSIFIED_LABEL as per the documentation in test_classifier.py it's possible to have multiple
# words to describe one label. Thus get them into a list and then check if one of them matches PET_LABEL
classified_labels = classified_label.split(",")
match = 0
for label in classified_labels:
if pet_label.lower() == label.lower():
match = 1
break
results[image] = [pet_label, classified_label, match]
return results | 5,328,465 |
def create_redis_fixture(scope="function"):
"""Produce a Redis fixture.
Any number of fixture functions can be created. Under the hood they will all share the same
database server.
Args:
scope (str): The scope of the fixture can be specified by the user, defaults to "function".
Raises:
KeyError: If any additional arguments are provided to the function than what is necessary.
"""
@pytest.fixture(scope=scope)
def _(_redis_container, pmr_redis_config):
db = redis.Redis(host=pmr_redis_config.host, port=pmr_redis_config.port)
db.flushall()
assign_fixture_credentials(
db,
drivername="redis",
host=pmr_redis_config.host,
port=pmr_redis_config.port,
database=None,
username=None,
password=None,
)
return db
return _ | 5,328,466 |
def get_heroes(**kwargs):
"""
Get a list of hero identifiers
"""
return make_request("GetHeroes",
base="http://api.steampowered.com/IEconDOTA2_570/", **kwargs) | 5,328,467 |
def print_table(file_scores, global_scores, output_filename,
n_digits=2, table_format='simple'):
"""Pretty print scores as table.
Parameters
----------
file_to_scores : dict
Mapping from file ids in ``uem`` to ``Scores`` instances.
global_scores : Scores
Global scores.
n_digits : int, optional
Number of decimal digits to display.
(Default: 3)
table_format : str, optional
Table format. Passed to ``tabulate.tabulate``.
(Default: 'simple')
"""
# col_names = ['File',
# 'DER', # Diarization error rate.
# 'JER', # Jaccard error rate.
# 'B3-Precision', # B-cubed precision.
# 'B3-Recall', # B-cubed recall.
# 'B3-F1', # B-cubed F1.
# 'GKT(ref, sys)', # Goodman-Krustal tau (ref, sys).
# 'GKT(sys, ref)', # Goodman-Kruskal tau (sys, ref).
# 'H(ref|sys)', # Conditional entropy of ref given sys.
# 'H(sys|ref)', # Conditional entropy of sys given ref.
# 'MI', # Mutual information.
# 'NMI', # Normalized mutual information.
# ]
col_names = ['File',
'DER', # Diarization error rate.
'Missed',
'FA',
'Confusion'
]
rows = sorted(file_scores, key=lambda x: x.file_id)
rows.append(global_scores._replace(file_id='*** OVERALL ***'))
floatfmt = '.%df' % n_digits
tbl = tabulate(
rows, headers=col_names, floatfmt=floatfmt, tablefmt=table_format)
if output_filename:
os.makedirs('/score', exist_ok=True)
with open(os.path.join('/score', output_filename), 'w') as outfile:
print(tbl, file = outfile)
print(tbl) | 5,328,468 |
def gt_strategy(
pandera_dtype: Union[numpy_engine.DataType, pandas_engine.DataType],
strategy: Optional[SearchStrategy] = None,
*,
min_value: Union[int, float],
) -> SearchStrategy:
"""Strategy to generate values greater than a minimum value.
:param pandera_dtype: :class:`pandera.dtypes.DataType` instance.
:param strategy: an optional hypothesis strategy. If specified, the
pandas dtype strategy will be chained onto this strategy.
:param min_value: generate values larger than this.
:returns: ``hypothesis`` strategy
"""
if strategy is None:
strategy = pandas_dtype_strategy(
pandera_dtype,
min_value=min_value,
exclude_min=True if is_float(pandera_dtype) else None,
)
return strategy.filter(lambda x: x > min_value) | 5,328,469 |
def get_smoker_status(observation):
"""Does `observation` represent a suvery response indicating that the patient is or was a smoker."""
try:
for coding in observation['valueCodeableConcept']['coding']:
if ('system' in coding and 'code' in coding and
coding['system'] == utils.SNOMED_SYSTEM and
(coding['code'] == '8517006' or coding['code'] == '449868002'
) # Former smoker or Every day smoker
):
return True
return False
except KeyError:
return False | 5,328,470 |
def model_input_data_api():
"""Returns records of the data used for the model."""
# Parse inputs
# Hours query parameter must be between 1 and API_MAX_HOURS.
hours = request.args.get('hours', default=24, type=int)
hours = min(hours, current_app.config['API_MAX_HOURS'])
hours = max(hours, 1)
df = execute_sql('''SELECT * FROM processed_data ORDER BY time''')
model_input_data = df.tail(n=hours).to_dict(orient='records')
return jsonify(model_input_data=model_input_data) | 5,328,471 |
def GetIAP(args, messages, existing_iap_settings=None):
"""Returns IAP settings from arguments."""
if 'enabled' in args.iap and 'disabled' in args.iap:
raise exceptions.InvalidArgumentException(
'--iap', 'Must specify only one of [enabled] or [disabled]')
iap_settings = messages.BackendServiceIAP()
if 'enabled' in args.iap:
iap_settings.enabled = True
elif 'disabled' in args.iap:
iap_settings.enabled = False
elif existing_iap_settings is not None:
iap_settings.enabled = existing_iap_settings.enabled
if iap_settings.enabled:
# If either oauth2-client-id or oauth2-client-secret is specified,
# then the other should also be specified.
if 'oauth2-client-id' in args.iap or 'oauth2-client-secret' in args.iap:
iap_settings.oauth2ClientId = args.iap.get('oauth2-client-id')
iap_settings.oauth2ClientSecret = args.iap.get('oauth2-client-secret')
if not iap_settings.oauth2ClientId or not iap_settings.oauth2ClientSecret:
raise exceptions.InvalidArgumentException(
'--iap',
'Both [oauth2-client-id] and [oauth2-client-secret] must be '
'specified together')
return iap_settings | 5,328,472 |
def test_get_metric_one(client: LNMetricsClient) -> None:
"""Get the metrics from one node"""
response = client.get_nodes(network="bitcoin")
node = response[0]
last_update = datetime.fromtimestamp(node["last_update"])
first = last_update.replace(minute=0, second=0, microsecond=0) - timedelta(hours=4)
response = client.get_metric_one(
network="bitcoin",
node_id=node["node_id"],
first=first.timestamp(),
last=last_update.timestamp(),
)
logging.debug(response)
assert response is not None | 5,328,473 |
def get_default_instance():
"""Return the default VLC.Instance.
"""
global _default_instance
if _default_instance is None:
_default_instance = Instance()
return _default_instance | 5,328,474 |
def __check_legacy_point_coordinates(updater: DocumentUpdater):
"""
Check if all array values in field has legacy geo point
coordinates type. Raise InconsistencyError if other arrays was found
:param updater:
:return:
"""
def by_path(ctx: ByPathContext):
fltr = {"$and": [
{ctx.filter_dotpath: {"$ne": None}},
*[{k: v} for k, v in ctx.extra_filter.items()],
# $expr >= 3.6, $isArray >= 3.2
{"$expr": {"$eq": [{"$isArray": f"${ctx.filter_dotpath}"}, True]}},
{"$expr": {"$ne": [{"$size": f"${ctx.filter_dotpath}"}, 2]}}, # $expr >= 3.6
# TODO: add element type check
]}
check_empty_result(ctx.collection, ctx.filter_dotpath, fltr)
def by_doc(ctx: ByDocContext):
doc = ctx.document
if updater.field_name in doc:
f = doc[updater.field_name]
valid = f is None or (isinstance(f, (list, tuple)) and len(f) == 2)
if not valid:
raise InconsistencyError(f"Field {updater.field_name} has wrong value {f!r} "
f"(should be legacy geo point) in record {doc}")
updater.update_combined(by_path, by_doc, False, False) | 5,328,475 |
def test_tabulation():
"""
To validate the scanner considers "\t" as a single character, but double space.
"""
s = "let\t:\n"
scan = Scanner(s)
for i, e in enumerate(scan):
if i < 3:
assert e == s[i]
assert scan.lineno == 0
assert scan.offset == i
assert f"{scan!s}" == f"in line 0 column {i}"
assert f"{scan!r}" == f"in line 0 column {i}:\n\t\"let :\"\n\t{' ' * (i + 1)}^"
assert scan.line == 'let :'
elif i == 3:
assert e == " "
assert scan.lineno == 0
assert scan.offset == 3
assert f"{scan!s}" == f"in line 0 column {3}"
assert f"{scan!r}" == f"in line 0 column {3}:\n\t\"let :\"\n\t{' ' * 4}^"
assert scan.line == 'let :'
elif i == 4:
assert e == " "
assert scan.lineno == 0
assert scan.offset == 3
assert f"{scan!s}" == f"in line 0 column {3}"
assert f"{scan!r}" == f"in line 0 column {3}:\n\t\"let :\"\n\t{' ' * 4}^"
assert scan.line == 'let :' | 5,328,476 |
def rules_yq_toolchains(version = YQ_DEFAULT_VERSION):
"""Register yq binary that specified version for all platforms as toolchains."""
if not YQ_BINDIST.get(version):
fail("Binary distribution of yq {} is not available.".format(version))
for os, checksum in YQ_BINDIST.get(version).items():
register_yq_toolchain(version = version, os = os, checksum = checksum) | 5,328,477 |
async def test_set_invalid_speed(hass, calls):
"""Test set invalid speed when fan has valid speed."""
await _register_components(hass)
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# Set fan's speed to high
await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH)
# verify
assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH
_verify(hass, STATE_ON, SPEED_HIGH, None, None)
# Set fan's speed to 'invalid'
await common.async_set_speed(hass, _TEST_FAN, "invalid")
# verify speed is unchanged
assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH
_verify(hass, STATE_ON, SPEED_HIGH, None, None) | 5,328,478 |
def flatList(items):
"""Yield items from any nested iterable; see Reference."""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
for sub_x in flatList(x):
yield sub_x
else:
yield x | 5,328,479 |
def dirty_multi_node_expand(node, precision, mem_map=None, fma=True):
""" Dirty expand node into Hi and Lo part, storing
already processed temporary values in mem_map """
mem_map = mem_map or {}
if node in mem_map:
return mem_map[node]
elif isinstance(node, Constant):
value = node.get_value()
value_hi = sollya.round(value, precision.sollya_object, sollya.RN)
value_lo = sollya.round(value - value_hi, precision.sollya_object, sollya.RN)
ch = Constant(value_hi,
tag=node.get_tag() + "hi",
precision=precision)
cl = Constant(value_lo,
tag=node.get_tag() + "lo",
precision=precision
) if value_lo != 0 else None
if cl is None:
Log.report(Log.Info, "simplified constant")
result = ch, cl
mem_map[node] = result
return result
else:
# Case of Addition or Multiplication nodes:
# 1. retrieve inputs
# 2. dirty convert inputs recursively
# 3. forward to the right metamacro
assert isinstance(node, Addition) or isinstance(node, Multiplication)
lhs = node.get_input(0)
rhs = node.get_input(1)
op1h, op1l = dirty_multi_node_expand(lhs, precision, mem_map, fma)
op2h, op2l = dirty_multi_node_expand(rhs, precision, mem_map, fma)
if isinstance(node, Addition):
result = Add222(op1h, op1l, op2h, op2l) \
if op1l is not None and op2l is not None \
else Add212(op1h, op2h, op2l) \
if op1l is None and op2l is not None \
else Add212(op2h, op1h, op1l) \
if op2l is None and op1l is not None \
else Add211(op1h, op2h)
mem_map[node] = result
return result
elif isinstance(node, Multiplication):
result = Mul222(op1h, op1l, op2h, op2l, fma=fma) \
if op1l is not None and op2l is not None \
else Mul212(op1h, op2h, op2l, fma=fma) \
if op1l is None and op2l is not None \
else Mul212(op2h, op1h, op1l, fma=fma) \
if op2l is None and op1l is not None \
else Mul211(op1h, op2h, fma=fma)
mem_map[node] = result
return result | 5,328,480 |
def update_interface_device_hostname(apps, schema_editor):
"""Update all interfaces with hostname from associated device"""
Device = apps.get_model('nsot', 'Device')
for dev in Device.objects.iterator():
dev.interfaces.update(device_hostname=dev.hostname) | 5,328,481 |
def create_princess_df(spark_session) -> DataFrame:
"""Return a valid DF of disney princesses."""
princesses = [
{
"name": "Cinderella",
"age": 16,
"happy": False,
"items": {"weakness": "thorns", "created": "2020-10-14"},
},
{
"name": "Snow white",
"age": 17,
"happy": True,
"items": {"weakness": "apple", "created": "2020-10-14"},
},
{
"name": "Belle",
"age": 18,
"happy": False,
"items": {"weakness": "roses", "created": "2020-10-14"},
},
{
"name": "Jasmine",
"age": 19,
"happy": True,
"items": {"weakness": "jafar", "created": "2020-10-14"},
},
]
return (
spark_session.read.option("multiline", "true")
.json(spark_session.sparkContext.parallelize([json.dumps(princesses)]))
.select("name", "age", "happy", "items")
) | 5,328,482 |
def get_sheet_names(file_path):
"""
This function returns the first sheet name of the excel file
:param file_path:
:return:
"""
file_extension = Path(file_path).suffix
is_csv = True if file_extension.lower() == ".csv" else False
if is_csv:
return [Path(file_path).name]
xl = pd.ExcelFile(file_path)
return xl.sheet_names | 5,328,483 |
def build_config(ctx, params):
"""Load configuration, load modules and install dependencies.
This function loads the configuration and install all necessary
dependencies defined on a `requirements.txt` file inside the module.
If the flag `--verbose` is passed the logging level will be set as debug and
all logs will be shown to the user.
Args:
ctx (:obj:`click.Context`): The current click cli context.
params (dict): a dictionary of all parameters pass to the click
context when invoking this function as a callback.
Returns:
int: the exit code. Always returns 0 in this case.
"""
click.echo("Opsdroid will build modules from config.")
path = params.get("path")
with contextlib.suppress(Exception):
check_dependencies()
config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)
if params["verbose"]:
config["logging"] = {"level": "debug"}
configure_logging(config["logging"])
with OpsDroid(config=config) as opsdroid:
opsdroid.loader.load_modules_from_config(config)
click.echo(click.style("SUCCESS:", bg="green", bold=True), nl=False)
click.echo(" Opsdroid modules successfully built from config.") | 5,328,484 |
def test_move_scss_010(temp_builds_dir):
"""
'Move' event on main sample
"""
basedir = temp_builds_dir.join('watcher_move_scss_010')
bdir, inspector, settings_object, watcher_opts = start_env(basedir)
build_scss_sample_structure(settings_object, basedir)
# Init handler
project_handler = UnitTestableProjectEventHandler(
settings_object,
inspector,
**watcher_opts
)
project_handler.on_moved(DummyMoveEvent(bdir('sass/main.scss')))
results = os.listdir(basedir.join("css").strpath)
results.sort()
assert results == [
'main.css',
'main_importing.css'
] | 5,328,485 |
async def test_advanced_option_flow(hass):
"""Test advanced config flow options."""
controller = await setup_unifi_integration(
hass, clients_response=CLIENTS, wlans_response=WLANS
)
result = await hass.config_entries.options.async_init(
controller.config_entry.entry_id, context={"show_advanced_options": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device_tracker"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT"],
CONF_DETECTION_TIME: 100,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "client_control"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]], CONF_POE_CLIENTS: False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "statistics_sensors"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_ALLOW_BANDWIDTH_SENSORS: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT"],
CONF_DETECTION_TIME: 100,
CONF_IGNORE_WIRED_BUG: False,
CONF_POE_CLIENTS: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_ALLOW_BANDWIDTH_SENSORS: True,
} | 5,328,486 |
def count_votes(votation_id):
"""
Count number of different vote_key. Its pourpose is to compare with voters.
"""
n = db.session.query(Vote.vote_key).filter(Vote.votation_id == votation_id).distinct().count()
return n | 5,328,487 |
def test_ca1dmodel():
""" ConstantAcceleration Transition Model test """
state_vec = np.array([[3.0], [1.0], [0.1]])
noise_diff_coeffs = np.array([[0.01]])
base(state_vec, noise_diff_coeffs) | 5,328,488 |
def update_ammo(ammo_label):
""" Updates the label that keeps track of the ammunition """
ammo_label.set_text(f"{ammo[aim_mode]}/{full_ammo[aim_mode]}") | 5,328,489 |
def get_eng_cv_rate(low_prob):
"""Returns 'low' and 'high' probabilites for student to english I conversion
Simulated data for class enrollment.
Args:
low_prob(float): low end of probability
Returns: dict
"""
np.random.seed(123)
global eng_cv_rate_dict
eng_cv_rate_dict = {'low':low_prob, 'high':np.random.uniform(low = low_prob, high=1.25*low_prob)}
return eng_cv_rate_dict | 5,328,490 |
def dev_unify_nest(args: Type[MultiDev], kwargs: Type[MultiDev], dev, mode, axis=0, max_depth=1):
"""
Unify the input nested arguments, which consist of sub-arrays spread across arbitrary devices, to unified arrays
on the single target device.
:param args: The nested positional arguments to unify.
:type args: MultiDev
:param kwargs: The nested keyword arguments to unify.
:type kwargs: MultiDev
:param dev: The device to unify the nested arguments to.
:type dev: Device
:param mode: The mode by which to unify, must be one of [ concat | mean | sum ]
:type mode: str
:param axis: The axis along which to concattenate the sub-arrays. Default is 0.
:type axis: int, optional
:param max_depth: The maximum nested depth to reach. Default is 1. Increase this if the nest is deeper.
:type max_depth: int, optional
:return: nested arguments unified to the target device
"""
args = args._data if isinstance(args, MultiDevIter) else args
kwargs = kwargs._data if isinstance(kwargs, MultiDevIter) else kwargs
args_uni = ivy.nested_map(args, lambda x: dev_unify(x, dev, mode, axis), max_depth=max_depth)
kwargs_uni = ivy.nested_map(kwargs, lambda x: dev_unify(x, dev, mode, axis), max_depth=max_depth)
return args_uni, kwargs_uni | 5,328,491 |
def make_preprocessor(transforms=None, device_put=False):
"""
"""
# verify input
if transforms is not None:
if not isinstance(transforms, (list, tuple)):
transforms = (transforms)
for fn in transforms:
if not callable(fn):
raise ValueError("Each element of custom_fns must be callabe")
def preprocess(obs):
# apply custom transforms first
if transforms:
for fn in transforms:
obs = fn(obs)
# convert obs to array
if isinstance(obs, (int, float)):
return jnp.array(obs).reshape((1,))
if not obs.shape:
return obs.reshape((1,))
# put array to device if flag is set
if device_put:
obs = jax.device_put(obs)
return obs
return preprocess | 5,328,492 |
def validate_sintel(args, model, iters=50):
""" Evaluate trained model on Sintel(train) clean + final passes """
model.eval()
pad = 2
for dstype in ['clean', 'final']:
val_dataset = datasets.MpiSintel(args, do_augument=False, dstype=dstype)
epe_list = []
tv_list = []
for i in tqdm(range(len(val_dataset))):
image1, image2, flow_gt, _ = val_dataset[i]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
image1 = F.pad(image1, [0, 0, pad, pad], mode='replicate')
image2 = F.pad(image2, [0, 0, pad, pad], mode='replicate')
with torch.no_grad():
flow_predictions, aux_vars, dlta_flows = model.module(image1, image2, iters=iters)
flow_pr = flow_predictions[-1][0,:,pad:-pad]
epe = torch.sum((flow_pr - flow_gt.cuda())**2, dim=0)
epe = torch.sqrt(epe).mean()
tv = total_variation(flow_pr[None])
epe_list.append(epe.item())
tv_list.append(tv.sum(dim=1).mean().item())
if args.save_images and i % SAVE_FREQ == 0:
display(image1[0,:,pad:-pad], image2[0,:,pad:-pad], flow_pr, flow_gt, os.path.join(args.log_dir, dstype + "_{}__epe_{:.2f}__tv_{:.2f}.png".format(i,epe.item(),tv.sum(dim=1).mean().item())))
#display_flow_iterations(flow_predictions, os.path.join(args.log_dir, dstype + "_{}_flows.png".format(i)))
#display_flow_iterations(q_predictions, os.path.join(args.log_dir, dstype + "_{}_post_admm.png".format(i)))
#display_flow_iterations(dlta_flows, os.path.join(args.log_dir, dstype + "_{}_dlta_flows.png".format(i)))
#display_delta_flow_norms(dlta_flows, os.path.join(args.log_dir, dstype + "_{}_norms.png".format(i)))
print("Validation (%s) EPE: %.2f TV: %.2f" % (dstype, np.mean(epe_list), np.mean(tv_list))) | 5,328,493 |
def timeRangeContainsRange(event1Start, event2Start, event1End, event2End):
"""
Returns true if one set of times starts and ends
within another set of times
@param event1Start: datetime
@param event2Start: datetime
@param event1End: datetime
@param event2End: datetime
@return: boolean
"""
if event2Start <= event1Start and event2End >= event1End:
return True
elif event1Start <= event2Start and event1End >= event2End:
return True
else:
return False | 5,328,494 |
def compute_confidence_intervals(x: np.array, z: float = 1.96) -> float:
"""
Function to compute the confidence interval of the mean of a sample.
Hazra, Avijit. "Using the confidence interval confidently." Journal of thoracic disease 9.10 (2017): 4125.
Formula:
CI = x̅ ± z × (std/√n)
where
CI: Confidence Interval
x̅: Sample Mean
z: Z Statistic for desired confidence interval
std: Sample Standard Deviation
n: Sample Size
"""
return z * (x.std()/len(x)**.5) | 5,328,495 |
def _deinitialized( ):
"""
後始末
"""
camera = bpy.types.Camera
del camera.tilt_shift_vertical
del camera.tilt_shift_horizontal
del camera.temp_lens
del camera.temp_shift_x
del camera.temp_shift_y | 5,328,496 |
def SendCommands(cmds, key):
"""Send commands to the running instance of Editra
@param cmds: List of command strings
@param key: Server session authentication key
@return: bool
"""
if not len(cmds):
return
# Add the authentication key
cmds.insert(0, key)
# Append the message end clause
cmds.append(MSGEND)
try:
# Setup the client socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', EDPORT))
# Server expects commands delimited by ;
client.send(u";".join(cmds))
client.shutdown(socket.SHUT_RDWR)
client.close()
except:
return False
else:
return True | 5,328,497 |
def setmanager(domain_name, user_name='manager'):
""" Make a user manager of a domain
"""
domain = models.Domain.query.get(domain_name)
manageruser = models.User.query.get(user_name + '@' + domain_name)
domain.managers.append(manageruser)
db.session.add(domain)
db.session.commit() | 5,328,498 |
def mock_legacy_venv(venv_name: str, metadata_version: Optional[str] = None) -> None:
"""Convert a venv installed with the most recent pipx to look like
one with a previous metadata version.
metadata_version=None refers to no metadata file (pipx pre-0.15.0.0)
"""
venv_dir = Path(constants.PIPX_LOCAL_VENVS) / canonicalize_name(venv_name)
if metadata_version == "0.2":
# Current metadata version, do nothing
return
elif metadata_version == "0.1":
mock_pipx_metadata_template = MOCK_PIPXMETADATA_0_1
elif metadata_version is None:
# No metadata
os.remove(venv_dir / "pipx_metadata.json")
return
else:
raise Exception(
f"Internal Test Error: Unknown metadata_version={metadata_version}"
)
modern_metadata = pipx_metadata_file.PipxMetadata(venv_dir).to_dict()
# Convert to mock old metadata
mock_pipx_metadata = {}
for key in mock_pipx_metadata_template:
if key == "main_package":
mock_pipx_metadata[key] = _mock_legacy_package_info(
modern_metadata[key], metadata_version=metadata_version
)
if key == "injected_packages":
mock_pipx_metadata[key] = {}
for injected in modern_metadata[key]:
mock_pipx_metadata[key][injected] = _mock_legacy_package_info(
modern_metadata[key][injected], metadata_version=metadata_version
)
else:
mock_pipx_metadata[key] = modern_metadata[key]
mock_pipx_metadata["pipx_metadata_version"] = mock_pipx_metadata_template[
"pipx_metadata_version"
]
# replicate pipx_metadata_file.PipxMetadata.write()
with open(venv_dir / "pipx_metadata.json", "w") as pipx_metadata_fh:
json.dump(
mock_pipx_metadata,
pipx_metadata_fh,
indent=4,
sort_keys=True,
cls=pipx_metadata_file.JsonEncoderHandlesPath,
) | 5,328,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.