content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import requests
import json
def create_secret(api_url, token, scope, secret_name, secret_value):
"""
Creates a secret in Databricks workspace in the given scope.
This will overwrite any existing secrets with the same name.
"""
r = requests.post(api_url + 'preview/secret/secrets/write',
headers={"Authorization": "Bearer " + token},
json={"scope": scope, "key": secret_name, "string_value": secret_value
})
response_body = r.json()
if r.status_code != 200:
raise Exception('Error creating scope: ' + json.dumps(response_body))
return (response_body) | 4b1cf12d115aa8c3c04d7e59adae11cbf36489cb | 32,609 |
def autorange_xy(img, axx, axy, data, xy_limits, xy_pad):
""" Adjust axx and axy vertical range.
xy_limits:
None or "auto" # matplotlib default range
(min, max) # vrange to specified values
"data" # vrange to min and max of data -/+ xy_pad
"match" # vrange to min and max of default ranges of each
"clim" # vrange to img.clim() -/+ xy_pad
args:
axx # horizontal axis
axy # vertical axis
data # 2D numpy.ndarray
xy_limits # None or "auto" / (min, max) / "data" / "match" / "clim"
xy_pad # padding of the xy vertical range
# (active for xy_limits="data" or "clim")
returns:
axx, axy
"""
# axx vertical range
if xy_limits is None or xy_limits == "auto":
pass
elif isinstance(xy_limits, tuple):
# values specified
axx.set_ylim(*xy_limits)
axy.set_xlim(*xy_limits)
elif xy_limits == "data":
# edge plots range to match padded data range
rng = data.max() - data.min()
limits = (data.min() - xy_pad * rng,
data.max() + xy_pad * rng)
axx.set_ylim(*limits)
axy.set_xlim(*limits)
elif xy_limits == "match":
# edge plots range to match each other
limits = (min(axx.get_ylim()[0], axy.get_xlim()[0]),
max(axx.get_ylim()[1], axy.get_xlim()[1]))
axx.set_ylim(*limits)
axy.set_xlim(*limits)
elif xy_limits == "clim":
# edge plots range to match padded image clim
clim = img.get_clim()
rng = clim[1] - clim[0]
limits = (clim[0] - xy_pad * rng,
clim[1] + xy_pad * rng)
axx.set_ylim(*limits)
axy.set_xlim(*limits)
else:
raise ValueError(f"Invalid value for `xy_limits`={xy_limits}")
return axx, axy | a375f92f2800a2dbd7b600c26dfc6e287325eb12 | 32,610 |
from typing import Iterable
from typing import Iterator
def flatten_iterator(*args: Iterable, depth=None) -> Iterator:
"""
Iterates and flattens iterables recursively according to the specified depth.
If depth=None (the default) it flattens recursively until it finds no iterable.
>>> type(flatten_iterator([1, 2, [3, 4, ['cinco']]]))
<class 'generator'>
>>> list(flatten_iterator([1, 2, [3, 4, ['cinco']]]))
[1, 2, 3, 4, 'cinco']
>>> list(flatten_iterator([1, 2, [3, 4, ['cinco']]], depth=1))
[1, 2, [3, 4, ['cinco']]]
>>> list(flatten_iterator([1, 2, [3, 4, ['cinco']]], depth=2))
[1, 2, 3, 4, ['cinco']]
"""
current_depth = -1
def flatten_iterator_(*args_: Iterable, depth_=None) -> Iterator:
nonlocal current_depth
if depth_ is not None:
current_depth += 1
for arg_ in args_:
if isinstance(arg_, Iterable) and not isinstance(arg_, (str, bytes)) and (depth_ is None or current_depth < depth_):
yield from flatten_iterator_(*arg_, depth_=depth_)
if depth_ is not None:
current_depth -= 1
else:
yield arg_
return flatten_iterator_(*args, depth_=depth) | 2e10b23a7c17fb2a19691e10d8b4c290b50f2ca0 | 32,611 |
def oddNumbers(l, r):
"""
List odd numbers within a closed interval.
:param l: left interval endpoint (inclusive)
:param r: right interval endpoint (inclusive)
:return: odd numbers within [l, r].
"""
l = l if l % 2 == 1 else l + 1
r = r if r % 2 == 0 else r + 1
return list(range(l, r, 2)) | aa2768b013f42030a0bae2526c169c412963f235 | 32,612 |
def num_groups_1(payload):
"""This function returns the a list of 24 numbers representing the payload"""
num_bits = len(payload) * 8
num_groups = []
payload_num = int.from_bytes(payload, 'big')
iterations = num_bits // 11
for _ in range(iterations):
num = payload_num >> num_bits - 11 & 2047
num_groups.append(num)
payload_num <<= 11
return num_groups | 1ee226c6c4b1444a712134f16d73cd6326aa18df | 32,613 |
def times(values):
"""
Reads the stdout logs, calculates the various cpu times and creates a dictionary
of idle time and the total time
Parameters
----------
values : list
output of the command from the std out logs
Returns
-------
tuple
idle and total time of the cpu
"""
user, nice, system, idle, io, irq, soft, steal, _, _ = values
idle = idle + io
non_idle = user + nice + system + irq + soft + steal
total = idle + non_idle
return total, idle | 74b7675a5854c757f3f3f2ddf53474b664e3d74b | 32,614 |
def _weight_with_fixed_dimension_order(arr, weight=None):
"""Apply weights to an array, preserving dimension order."""
if weight is None:
return arr
return (weight.fillna(0) * arr).transpose(*arr.dims) | 77e0ac5c5e392ea9773c74e6bdb7e20549994a83 | 32,615 |
def cluster_profile_query(city):
"""SQL query to get cluster descriptions as 24-houred timeseries within `city`
Parameters
----------
city : str
City of interest, either ̀bordeaux` or `lyon`
Returns
-------
str
SQL query that gives the timeseries cluster profile in `city`
"""
if city not in ('bordeaux', 'lyon'):
raise ValueError("City '{}' not supported.".format(city))
return ("WITH ranked_centroids AS ("
"SELECT *, rank() OVER (ORDER BY stop DESC) AS rank "
"FROM {schema}.{centroid}) "
"SELECT cluster_id, "
"h00, h01, h02, h03, h04, h05, h06, h07, h08, h09, h10, h11, "
"h12, h13, h14, h15, h16, h17, h18, h19, h20, h21, h22, h23, "
"start, stop "
"FROM ranked_centroids "
"WHERE rank=1"
";").format(schema=city,
centroid='centroid') | fedb6ec448f6b3e273730e9898481fef1c2b7a2a | 32,617 |
import importlib
def import_error():
"""Importing inaccessible module."""
try:
importlib.import_module('no_such_module')
except ImportError:
return "module not found" | 61b790c2230852910987284dc5e8bced429b08b2 | 32,618 |
def format_duration(seconds: float) -> str:
"""
Nicely format a given duration in seconds.
Args:
seconds: The duration to format, in seconds.
Returns:
The duration formatted as a string with unit of measurement appended.
"""
return f"{seconds:.2f} sec" | dac9a110051680e75bdcb99c473270fa43b1d07a | 32,619 |
import requests
def browse(url):
"""Retrieve the server response contents of the given URL."""
# A cookie is required to allow books with adult content to be served.
return requests.get(url, cookies={"adultOff": "no"}).text | 06b6d1195141dde662fd5252714e8d5facdc8c1d | 32,621 |
import torch
def apply_across_dim(function, dim=1, shared_keys=None, **tensors):# -> Dict[str, torch.Tensor]:
"""
Apply a function repeatedly for each tensor slice through the given dimension.
For example, we have tensor [batch_size, X, input_sequence_length] and dim = 1, then we will concatenate the following matrices on dim=1.
- function([:, 0, :])
- function([:, 1, :])
- ...
- function([:, X-1, :]).
Args:
function (function): Function to apply.
dim (int): Dimension through which we'll apply function. (1 by default)
shared_keys (set): Set of keys representing tensors to be shared. (None by default)
tensors (torch.Tensor): Keyword arguments of tensors to compute. Dimension should >= `dim`.
Returns:
Dict[str, torch.Tensor]: Dictionary of tensors, whose keys are corresponding to the output of the function.
"""
# Separate shared and non-shared tensors
shared_arguments = {}
repeat_targets = {}
for key, tensor in tensors.items():
if not isinstance(tensor, torch.Tensor) or (shared_keys and key in shared_keys):
shared_arguments[key] = tensor
else:
repeat_targets[key] = tensor
# Check whether the size of the given dimension is the same across sliced_tensors.
size = {key: tensor.shape[dim] for key, tensor in repeat_targets.items()}
assert len(set(size.values())) == 1, 'Tensors does not have same size on dimension %s: We found %s' % (dim, size)
# Since the sizes are the same, we will represent the size using the first entry.
size = list(size.values())[0]
# Dictionary for storing outputs
output = {}
for i in range(size):
# Build kwargs for the function.
kwargs = {key: tensor.select(dim=dim, index=i).contiguous() for key, tensor in repeat_targets.items()}
kwargs.update(shared_arguments)
# Apply function on the slice and restore the dimension for concatenation.
for key, tensor in function(**kwargs).items():
if key in shared_keys:
continue
if key not in output:
output[key] = []
output[key].append(tensor.unsqueeze(dim=dim))
# Check whether the outputs are have the same size.
assert all(len(t) == size for t in output.values())
# Concatenate all outputs, and return.
return {key: torch.cat(tensor, dim=dim).contiguous() for key, tensor in output.items()} | efea38442de6c42c0d3d4eead8ddf18546559f31 | 32,623 |
def bool_setter(value: bool):
"""Generic setter for bool objects
Args:
value: The value to be validated.
Raises:
TypeError: If the value is not bool
"""
if isinstance(value, bool) or value is None:
return value
if value == "false":
return False
if value == "true":
return True
raise ValueError("Type should be bool") | eec686da23b4a95c0276b8e2b97975fe24bf7b91 | 32,624 |
def get_data():
"""
Data taken from `here <https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/>`_.
A pair of binary variables, `a` and `p`, are returned.
:return: a, p
"""
tn = [(0, 0) for _ in range(50)]
fp = [(0, 1) for _ in range(10)]
fn = [(1, 0) for _ in range(5)]
tp = [(1, 1) for _ in range(100)]
data = tn + fp + fn + tp
a = [a for a, _ in data]
p = [b for _, b in data]
return a, p | 840f7df7087c5870b0a7213845f07f761305f2ce | 32,625 |
def get_stream_to_id(storage_channel):
"""
新注册的channel有stream_to_id字段,存量的数据stream_to_id默认为主键id
"""
return storage_channel.stream_to_id if storage_channel.stream_to_id else storage_channel.id | 9a773d32daebdba0caa48f933c7c506dce59a089 | 32,626 |
import os
def get_env(key, default=None):
"""
Get an environment variable or return exception
"""
try:
return os.environ[key]
except KeyError:
if default is not None:
return default
error_msg = f"Environment variable '{key}' not set."
raise Exception(error_msg) from None | f810ba7256128a2c9f60691089ecaa2e3b8ce408 | 32,627 |
def string_to_index(needle, columns):
"""Given a string, find which column index it corresponds to.
:param needle: The string to look for.
:param columns: The list of columns to search in.
:returns: The index containing that string.
:raises ValueError: Value "`needle`" not found in columns "`columns`"
"""
for index, value in enumerate(columns):
if needle == value:
return index + 1
raise ValueError("Value \"{0}\" not found in columns \"{1}\"".format(needle, columns)) | 358a88e1ec487b142ae6380a1cb9579688cd0451 | 32,629 |
def apply_swap(M,swapped):
"""
Takes a (column) swap tuple produced by bareiss(M) and
applies it to M.
swapped[j] = k means that column k was moved to position j
"""
if swapped is None:
return None
Mres = M.__copy__() # copy to get a matrix of the right dimension
for j in range(len(swapped)):
#print "swapped: j", j, "becomes", swapped[j]
Mres[:,j] = M[:,swapped[j]]
not_swapped = [i for i in range(M.ncols()) if i not in swapped]
for j in range(len(not_swapped)):
#print "not_swapped: j", j+len(swapped), "becomes", not_swapped[j]
Mres[:,j+len(swapped)] = M[:,not_swapped[j]]
return Mres | c3232e35213e824b718239127050e795da237965 | 32,631 |
import torch
def intersect1d(tensor1, tensor2):
"""Intersect 1D set."""
x = torch.LongTensor(list(set(tensor1.tolist()) & set(tensor2.tolist())))
if tensor1.is_cuda:
x = x.cuda()
return x | ea4ec297f274daebb322c04d96f9175990fdd91d | 32,632 |
def is_valid(actions):
""" Retorna se as combinacoes de compra e venda da acoes respeitam ordem cronologica. """
for action in actions:
if action != [0, 0]:
if action[0] >= action[1]:
return False
return True | f12137c40c416729fd679c134ab5af19b038ca9e | 32,633 |
def nested_sum(t):
"""Computes the total of all numbers in a list of lists.
t: list of list of numbers
returns: number
"""
total = 0
for nested in t:
total += sum(nested)
return total | 44d9fa3e0a6011c74f23a002e86bef13b0c52e72 | 32,634 |
def sortLocations(locations):
""" Sort the locations by ranking:
1. all on-axis points
2. all off-axis points which project onto on-axis points
these would be involved in master to master interpolations
necessary for patching. Projecting off-axis masters have
at least one coordinate in common with an on-axis master.
3. non-projecting off-axis points, 'wild' off axis points
These would be involved in projecting limits and need to be patched.
"""
onAxis = []
onAxisValues = {}
offAxis = []
offAxis_projecting = []
offAxis_wild = []
# first get the on-axis points
for l in locations:
if l.isOrigin():
continue
if l.isOnAxis():
onAxis.append(l)
for axis in l.keys():
if axis not in onAxisValues:
onAxisValues[axis] = []
onAxisValues[axis].append(l[axis])
else:
offAxis.append(l)
for l in offAxis:
ok = False
for axis in l.keys():
if axis not in onAxisValues:
continue
if l[axis] in onAxisValues[axis]:
ok = True
if ok:
offAxis_projecting.append(l)
else:
offAxis_wild.append(l)
return onAxis, offAxis_projecting, offAxis_wild | bee5635c7493ff265c3a59203ef5fdf32a35fe7c | 32,636 |
import functools
def cached(function):
"""An attempt to fix the functools.lru_cahche in python 3.7)."""
@functools.lru_cache(maxsize=30)
def cache(function, *args, **kwargs):
return function(*args, **kwargs)
@functools.wraps(function)
def wrapper(*args, **kwargs):
return cache(function, *args, **kwargs)
return wrapper | b2701fb6b8209b335868f1cd43132a828a03fcd8 | 32,637 |
def application_state(app):
"""Return the consolidated state for application *app*.
The *app* parameter must be a dict as returned by
:meth:`~RavelloClient.get_application`.
The consolidated state for an application is the set of distinct states
for its VMs. As special cases, None is returned if there are no VMs, and
the single state is returned if there is exactly one state.
"""
states = list(set((vm['state'] for vm in app.get('deployment', {}).get('vms', []))))
return states if len(states) > 1 else states[0] if len(states) == 1 else None | d7dbd1f17e311138864f7570c5d9432a621b728c | 32,638 |
from typing import IO
from typing import Any
from typing import List
import mmap
def _get_lines_from_fd(fd: IO[Any], nb: int = 10) -> List[str]:
"""
Get the last log lines from a fileno with mmap
:param fd: File descriptor on the log file
:param nb: number of messages to fetch
:returns: A list of message lines
"""
with mmap.mmap(fd.fileno(), 0, prot=mmap.PROT_READ) as m:
# start of messages begin with MI or MR, after a \n
pos = m.rfind(b"\nM") + 1
# number of message found so far
count = 0
while pos != 0 and count < nb - 1:
count += 1
pos = m.rfind(b"\nM", 0, pos) + 1
lines = m[pos:].decode(errors='replace').splitlines()
return lines | a3a97b7ff8fcc8a0e9b564233ccbb11fa0ee7061 | 32,639 |
import numpy
def softmax(a):
"""Softmax activation function.
The outputs will be interpreted as probabilities and thus have to
lie within [0, 1] and must sum to unity:
.. math::
g(a_f) = \\frac{\\exp(a_f)}{\\sum_F \\exp(a_F)}.
To avoid numerical problems, we substract the maximum component of
:math:`a` from all of its components before we calculate the output. This
is mathematically equivalent.
**Parameters**
:a: array-like, shape = [F,]; activations
**Returns**
:y: array-like, shape = [F,]; outputs
"""
y = numpy.exp(a - a.max())
return y / y.sum() | e86b112a46fae60aca0d912e5bab4b72c6a81b93 | 32,640 |
def bookkeep_reactant(mol):
"""Bookkeep bonds in the reactant.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance for reactants.
Returns
-------
pair_to_bond_type : dict
Mapping 2-tuples of atoms to bond type. 1, 2, 3, 1.5 are
separately for single, double, triple and aromatic bond.
"""
pair_to_bond_type = dict()
for bond in mol.GetBonds():
atom1, atom2 = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
atom1, atom2 = min(atom1, atom2), max(atom1, atom2)
type_val = bond.GetBondTypeAsDouble()
pair_to_bond_type[(atom1, atom2)] = type_val
return pair_to_bond_type | e8ee50904596152299d140a58ac4496e98c771df | 32,642 |
def _make_yaml_key(s):
"""
Turn an environment variable into a yaml key
Keys in YAML files are generally lower case and use dashes instead of
underscores. This isn't a universal rule, though, so we'll have to
either change the keys to conform to this, or have some way of indicating
this from the environment.
"""
return s.lower().replace("_", "-") | 3ebbd12458f0c34cdfce50cea53ed94be464cdbb | 32,644 |
def get_backbones(nts):
""" Get backbone pairs.
Args:
___
nts (dict): DSSR nucleotide info.
Returns:
---
bb (list): list of tuples (5' base, 3' base)
"""
bb = []
for i, three_p in enumerate(nts):
if i == 0:
continue
five_p = nts[i-1]
if five_p['chain_name'] != three_p['chain_name']:
continue
if three_p['nt_type'] != 'RNA' or five_p['nt_type'] != 'RNA':
continue
if 'break' not in three_p['summary']:
bb.append((five_p, three_p))
return bb | 724c38be0c5a29ac75fde21359467209f2f4a566 | 32,645 |
import hashlib
def get_file_hash(file_list):
"""
Gets an MD5 Hash value for each file in a list.
Returns a dictionary of {file: hash} items
"""
if type(file_list) != list:
file_list = [file_list]
BLOCKSIZE = 65536
file_dict = {}
for file in file_list:
hasher = hashlib.md5()
with open(file, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
file_dict[file] = hasher.hexdigest()
# print(file.name, ":", hasher.hexdigest())
return file_dict | 6302bda4c321539072f4c09d7b82807d3c1a2fbe | 32,646 |
def fib(n):
"""
This is an example of decorated function. Decorators are included in the documentation as well.
This is often useful when documenting web APIs, for example.
"""
if n < 2:
return n
return fib(n - 1) + fib(n - 2) | 0a1b465ecbd157be5508e3d6756817d2df8fe7ba | 32,648 |
import numpy
def edit_distance_string(s1, s2, cmp_cost=1.):
"""
Computes the edit distance between strings *s1* and *s2*.
:param s1: first string
:param s2: second string
:return: dist, list of tuples of aligned characters
Another version is implemented in module :epkg:`cpyquickhelper`.
It uses C++ to make it around 25 times faster than the python
implementation.
"""
n1 = len(s1) + 1
n2 = len(s2) + 1
dist = numpy.full((n1, n2), n1 * n2, dtype=numpy.float64)
pred = numpy.full(dist.shape, 0, dtype=numpy.int32)
for i in range(0, n1):
dist[i, 0] = i
pred[i, 0] = 1
for j in range(1, n2):
dist[0, j] = j
pred[0, j] = 2
pred[0, 0] = -1
for i in range(1, n1):
for j in range(1, n2):
c = dist[i, j]
p = 0
if dist[i - 1, j] + 1 < c:
c = dist[i - 1, j] + 1
p = 1
if dist[i, j - 1] + 1 < c:
c = dist[i, j - 1] + 1
p = 2
d = 0 if s1[i - 1] == s2[j - 1] else cmp_cost
if dist[i - 1, j - 1] + d < c:
c = dist[i - 1, j - 1] + d
p = 3
if p == 0:
raise RuntimeError( # pragma: no cover
"Unexpected value for p=%d at position=%r." % (p, (i, j)))
dist[i, j] = c
pred[i, j] = p
d = dist[len(s1), len(s2)]
equals = []
i, j = len(s1), len(s2)
p = pred[i, j]
while p != -1:
if p == 3:
equals.append((i - 1, j - 1))
i -= 1
j -= 1
elif p == 2:
j -= 1
elif p == 1:
i -= 1
else:
raise RuntimeError( # pragma: no cover
"Unexpected value for p=%d at position=%r." % (p, (i, j)))
p = pred[i, j]
return d, list(reversed(equals)) | 8da1eba2f313cabd8839de0e0155ebeb7c17ea33 | 32,649 |
from functools import reduce
def get(dic, ks):
"""
:param dic: Potentially multi-level dictionary
:param ks: Potentially `.`-separated keys
"""
ks = ks.split('.')
return reduce(lambda acc, elm: acc[elm], ks, dic) | 22ed5ff8e2d5e8ef5137d99a3b1b959427f953f8 | 32,650 |
def Validate(func, value):
"""Raises a ValueError if the value doesn't cause the given function to return true"""
if func(value):
return value
raise ValueError("%r: Invalid value %r" % (func, value)) | b079a809fa54635ee933242aad9c7d2673ee74ca | 32,651 |
def find_longest_paper(pubs):
"""
This function finds the longest paper in a year_dict, in terms of how many
tokens are in the paper.
Parameters:
pubs (list-like, required): The year_dict to be searched
Returns:
longest (int): The length of the longest paper in the year dict
"""
longest = 0
for paper in pubs:
if len(paper) > longest:
longest = len(paper)
return longest | 60f687c8131cef5bf77cb31cfe86a855136dcef7 | 32,652 |
def parent_counts(experiment_proto):
"""Return a map from all counts to counts from their input round.
Args:
experiment_proto: selection_pb2.Experiment describing the experiment.
Returns:
Dict[str, str] mapping SequencingReads names to the read name for positive
results from the previous. Reads without a parent count are omitted.
"""
input_counts = {}
for round_name, round_proto in experiment_proto.rounds.items():
if round_proto.input:
parent_round = experiment_proto.rounds[round_proto.input]
input_counts[round_name] = parent_round.positive_reads.name
else:
input_counts[round_name] = None
dependencies = {}
for round_name, round_proto in experiment_proto.rounds.items():
for reads in [round_proto.positive_reads, round_proto.negative_reads]:
field = reads.name
if field:
parent_count = input_counts[round_name]
if parent_count:
dependencies[field] = parent_count
return dependencies | 2fedab0eb54b75f12616e1c6ed232352a911643e | 32,653 |
def pattern_pos(element_number: int, position: int) -> int:
"""Return the pattern multiplier for parm element number at parm position."""
region = (position + 1) // element_number
quartet = region % 4
output = [0, 1, 0, -1][quartet]
return output | 2cf6fee20a1aac9944d95403068bf645bcd310bd | 32,654 |
def linspace(start, end, number_of_points):
"""
Generate a list of floats from start to end containing number_of_points elements.
clone of NumPy function with same name.
:param start: starting point of list.
:param end: ending point of list.
:param number_of_points: number of points in returned list.
"""
if start >= end:
raise ValueError(
'The starting value must be less than the ending value.')
if number_of_points < 2:
raise ValueError('The space must contain at least two points.')
interval = (end - start) / (number_of_points - 1)
return [start + interval * i for i in range(number_of_points)] | 9be2c37a67e3f1e00bac8dfa0691434570aa4bc9 | 32,655 |
def get_extra_pages(app):
"""
"""
result = []
context = app.builder.globalcontext
for context_key in context:
if context_key.startswith('theme_extra_pages_'):
page_name = context_key.split('theme_extra_pages_')[-1]
result.append(
(page_name, context, context[context_key],)
)
return result | 50bd9b633612390d79bec3d2bce09756ddde9bbd | 32,656 |
def markup_classifier(m):
"""Takes a markup conditions object and classifies according to logic defined below.
Should be customized for implementation."""
conditions = m.conditions
markup_class = None
if not conditions.target:
pass
#positive
elif (conditions.anatomy and not conditions.negated and not conditions.indication)\
or (conditions.anatomy and conditions.definitive):
markup_class = "Fluid collection-positive"
#negated
elif conditions.negated and not conditions.definitive:
markup_class = "Fluid collection-negated" #work on making this more generalizable
#indication
elif conditions.indication and not (conditions.negated or conditions.definitive
or conditions.historical or conditions.probable):
markup_class = "fluid collection-indication"
#check for pseudoanatomy
if conditions.pseudoanatomy and not conditions.anatomy:
markup_class = None
return markup_class | 8b524c4f61e788644fb90b5ee97f4a607c771a60 | 32,659 |
def get_op_list(arch):
"""
code modified from project https://github.com/naszilla/naszilla
"""
# given a string, get the list of operations
tokens = arch.split('|')
ops = [t.split('~')[0] for i,t in enumerate(tokens) if i not in [0,2,5,9]]
return ops | 88bbcc53e45b345875febecbfe6cd887a1d562a6 | 32,660 |
def fahrenheit_to_rankine(temp):
"""
From Fahrenheit (ºF) to Rankine (R)
"""
return temp + 459.67 | 728a0f25b4b0c5369d92aa8c5112be2aefe30a04 | 32,661 |
def split_endpoint_timestamp(file):
"""Split a file into the endpoint and timestamp part.
Parameters
----------
file : pathlib.Path
Can be a dated file or a link.
Returns
-------
str, str
endpoint name and timestamp
"""
endpoint, date, time = file.resolve().stem.rsplit("_", 2)
return endpoint, date + "_" + time | 53bbf880e80bf37f66ff95913f15d97fb2505cc4 | 32,662 |
def _event_QSpinBox(self):
"""
Return value change signal for QSpinBox
"""
return self.valueChanged | b9d4b9788b3770431858606b53992ef7694f82bb | 32,663 |
def attributes(value):
"""
Returns a list with attributes for the object
Example: {{ variable|attributes }}
"""
return [attribute for attribute in dir(value) if not attribute.startswith('_')] | d612737f98e0c6d76dd2476088a61699856ec7e6 | 32,664 |
def related_polygon_assembly(_polygons, _recognition_result):
"""
将关联的多边形根据识别文本结果进行融合
:param _polygons: 每个多边形
:param _recognition_result: 每个多边形检测的结果
:return: 融合后的多边形与对应的文本识别结果
"""
return _polygons, _recognition_result | ec7cfc4da1f5355872a4f2986be4d9dfcef1ce56 | 32,666 |
def system_reduction_factor(delta_ss, delta_frot, delta_fshear, eta_ss, eta_frot, eta_fshear):
"""
Calculates the system displacement reduction factor based on the foundation and superstrucutre
displacement reduction factors.
:param delta_ss: superstructure displacement
:param delta_frot: displacement due to foundation rotation
:param delta_fshear: displacement due to soil-foundation shear deformation
:param eta_ss: superstructure displacement reduction factor
:param eta_frot: foundation rotation displacement reduction factor
:param eta_fshear: soil foundation shear deformation displacement reduction factor
:return:
"""
delta_total = delta_ss + delta_frot + delta_fshear
return (delta_ss * eta_ss + delta_frot * eta_frot + delta_fshear * eta_fshear) / delta_total | 217aa81d2a148c22a719eeb71af4fc198931c5fc | 32,668 |
def generate_order_by_clause(params):
"""Generates order_by clause strings from the given list.
:param list params: A list of column names to sort the result to::
params = [
'id', 'name', 'full_path', 'parent_id',
'resource', 'status', 'project_id',
'task_type', 'entity_type', 'percent_complete'
]
will result a search string like::
order by
tasks.id, tasks.name, tasks.full_path,
tasks.parent_id, , resource_info.info,
"Statuses".code, "Tasks".project_id, task_types.name,
tasks.entity_type
"""
order_by_string = ''
order_by_string_buffer = []
column_dict = {
'id': 'id',
'parent_id': "parent_id",
'name': "name",
'path': "full_path",
'full_path': "full_path",
'entity_type': "entity_type",
'task_type': "task_types.name",
'project_id': 'project_id',
'date_created': 'date_created',
'date_updated': 'date_updated',
'has_children': 'has_children',
'link': 'link',
'priority': 'priority',
'depends_to': 'dep_info',
'resource': "resource_info.resource_id",
'responsible': 'responsible_id',
'watcher': 'watcher_id',
'bid_timing': 'bid_timing',
'bid_unit': 'bid_unit',
'schedule_timing': 'schedule_timing',
'schedule_unit': 'schedule_unit',
'schedule_model': 'schedule_model',
'schedule_seconds': 'schedule_seconds',
'total_logged_seconds': 'total_logged_seconds',
'percent_complete': 'percent_complete',
'start': 'start',
'end': '"end"',
'status': '"Statuses".code',
}
for column_name in params:
order_by_string_buffer.append(column_dict[column_name])
if len(order_by_string_buffer):
# need to indent the first element by hand
order_by_string = 'order by %s' % ', '.join(order_by_string_buffer)
return order_by_string | 9f9a74d6a16b53cd65542a000fe4215a9d16ced1 | 32,669 |
def remove_runkey(seq,runkeys):
"""Doc string here.."""
found_key = ''
for key in runkeys:
if (seq.find(key) == 0):
found_key = key
seq = seq[len(key):]
break
else:
continue
return found_key, seq | 59a8d7badeb4c0dc831ade306e0b5f2a10a081ae | 32,670 |
def rect2raster(r,h):
"""Convert iulib rectangles to raster coordinates. Raster coordinates are given
as (row0,col0,row1,col1). Note that this is different from some other parts of
Python, which transpose the rows and columns."""
(x0,y0,x1,y1) = (r.x0,r.y0,r.x1,r.y1)
y1 = h-y1-1
y0 = h-y0-1
return (y1,x0,y0,x1) | 7892e789076fa41e07db9f640f44a76498f53196 | 32,672 |
import importlib
def load(module, name):
"""Loads the given module and expects a class name derived from Model.
The class is created with the standard constructor.
"""
mod = importlib.import_module(module, __name__)
return getattr(mod, name)() | 20694a17f89f50395efb5ab7b0ee861f2c915243 | 32,673 |
def draw_box(image, bbox, color, lw):
"""Draw RGB(A) `color` bounding box on image array."""
y1, x1, y2, x2 = bbox
image[y1 : y1 + lw, x1:x2] = color
image[y2 : y2 + lw, x1:x2] = color
image[y1:y2, x1 : x1 + lw] = color
image[y1:y2, x2 : x2 + lw] = color
return image | e2156f60918d2fd9a1641ee33c744fe7288560b0 | 32,674 |
def get_entity_list_container_field(name):
"""Returns the container field used in list responses
GET /active_computers -> {"items": [...]}
GET /jobs -> {"jobs": [...]}
"""
if name == "active_computers":
return "items"
elif name == "inventory_computers":
return "items"
elif name == "rpc_tasks":
return "tasks"
elif name == "rpc_jobs":
return "jobs"
return name | b9210a61b9b1d4e33689370a118c983cedb71456 | 32,675 |
def _parse_findings(findings, region):
"""
Returns relevant information from AWS Security Hub API response.
Args:
findings (list): AWS Security Hub response.
region (str): AWS region.
Returns:
List[dict]: List of compliance information dictionaries.
"""
new_findings = []
for finding in findings:
new_findings.append(
{
"Region": region,
"Title": finding["Title"],
"Description": finding["Description"],
"Severity": finding["Severity"],
"Compliance": finding["Compliance"]["Status"],
"Recommendation": finding["Remediation"]["Recommendation"]["Text"],
"Reference": finding["Remediation"]["Recommendation"]["Url"],
}
)
return new_findings | 276f189027e105586a884cf74c8abee9bddd93be | 32,677 |
def KW_Variance(predictions, y_true):
"""Modification by Kuncheva et al. Expects a list of lists, containing predictions and a ground truth list."""
correct_count = [0 for i in range(len(y_true))] # initialize correct counts per sample
final_score = 0 # score to return
for i, y in enumerate(y_true): # for each sample
for pred in predictions: # check for each classifier
if y == list(pred)[i]:
correct_count[i] += 1 # if the prediction was correct, update counter
final_score += correct_count[i]*(len(predictions)-correct_count[i]) # sum over all classifiers for this count
return final_score/float(len(y_true)*pow(len(predictions),2)) | cfab3c62735464434066e5ca77230329be2adc3d | 32,678 |
def read_playlists(fname="playlists.txt"):
"""Reads in the Playlists"""
with open(fname) as f:
return f.readlines() | d6d36ae0df82b26c4b64bf3d785c8c42874a25cc | 32,679 |
def is_valid_filename(filename):
"""Check if a file has a valid filename (valid chars and max length gotten from
stack overflow and other sources, may be wrong)"""
# Added some accents, ö and stuff like that is still verboten
valid_chars = '-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789àèéíóòúüÀÈÉÍÒÓÚÜ'
if len(filename) > 260:
return False
for char in filename:
if char not in valid_chars:
return False
return True | 680d7719c08cc0159bb9d50787253d97129b8f42 | 32,682 |
def disjoint_union(*graphs):
"""Given a list of graphs, construct their disjoint union."""
res_vertices = []
res_edges = []
for (vertices, edges) in graphs:
l = len(res_vertices)
res_edges.extend((a+l, b+l) for (a, b) in edges)
res_vertices.extend(vertices)
return (res_vertices, res_edges) | dd8a62ca4c3f9603fef56f52e83d643932d03b27 | 32,683 |
def bool_str(b: bool) -> str:
"""Converts boolean to string ('0' or '1')"""
return '1' if b else '0' | 9bbcc98a9d488e09d19c8b5689583ee835d900b8 | 32,684 |
import os
def get_default_connection_details():
""" Gets the connection details based on environment vars or Thanatos default settings.
:return: Returns a dictionary of connection details.
:rtype: dict
"""
return {
'host': os.environ.get('MYSQL_HOST', '127.0.0.1'),
'user': os.environ.get('MYSQL_USER', 'vagrant'),
'password': os.environ.get('MYSQL_PASSWORD', 'vagrant'),
'database': os.environ.get('MYSQL_DB', 'thanatos'),
} | 03453cfcfd8762dc1ea0457c238e8dace934d23d | 32,685 |
def obsmode_name(mode):
"""Return full name of the observing mode"""
if type(mode) is not list:
mode = [mode]
full_names = {'fiducial': 'Fiducial',
'binospec': 'Binospec',
'hectochelle': 'Hectochelle',
'desi': 'DESI-like',
'gaia': 'Gaia-like',
'exgal': 'Extragalactic'}
keys = full_names.keys()
names = []
for m in mode:
if m in keys:
name = full_names[m]
else:
name = m
names += [name]
return names | e608ae1e60286202153b0754fa071239c280eed9 | 32,686 |
def no_of_misplaced_tiles(state):
"""
Returns the number of the misplaced tiles in the given state
state: a list representing the state to be checked
"""
h1 = 0
goal_state = [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
for y in range(len(goal_state)):
for x in range(len(goal_state[y])):
if state[y][x] != goal_state[y][x]:
h1 += 1
return h1 | 1901f757db0b27ba3b3e8235efa1d52abeb3d18b | 32,687 |
import re
def parse_tweet_text(tweet_text):
"""
Input: tweet_text: a string with the text of a single tweet
or a concatenation of tweets
Output: lists of tokens in the text:
words (many emoticons are recognized as words)
hashtags
users mentioned
urls
Usage: words, hashes, users, urls = parse_tweet_text(tweet_text)
"""
content = tweet_text
# collect and remove URLs
urls = re.findall(r"\b((?:https?|ftp|file)://[-A-Z0-9+&@#/%?=~_|$!:,.;]*[A-Z0-9+&@#/%=~_|$])", content, re.IGNORECASE)
content = re.sub(r"\b((?:https?|ftp|file)://[-A-Z0-9+&@#/%?=~_|$!:,.;]*[A-Z0-9+&@#/%=~_|$])", "", content, 0, re.IGNORECASE)
content = content.lower()
# collect and remove users mentioned
users = re.findall(r"@(\w+)", content)
content = re.sub(r"@(\w+)", "", content, 0)
# collect and remove hashtags
hashes = re.findall(r"#(\w+)", content)
content = re.sub(r"#(\w+)", "", content, 0)
# strip out extra whitespace in the remaining text
content = re.sub(r"\s{2,}", " ", content)
# strip out singleton punctuation
raw_words = content.split()
words = []
for word in raw_words:
if word in ['.',':','!',',',';',"-","-","?",'\xe2\x80\xa6',"!","|",'"','~','..','/']: continue
re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
word = re_pattern.sub(u'\uFFFD', word)
#if word.encode('utf-8') in ['\xe2\x80\xa6']: continue
# remove trailing commas, periods, question marks, colons, exclamation marks
word = re.sub(r"(.*)[,\.,\?,:,!]$", r"\1", word, 0, re.MULTILINE)
words.append(word)
return (words, hashes, users, urls) | facb5f319a542ca34c9cacf6c930b98d001de78e | 32,688 |
def get_x_y(receiver):
"""
(receiver: Receiver) -> (Column, Column)
Returns x column and y column tuple.
Assumes Receiver has at least two columns, takes first two.
"""
selection = receiver.selection
return selection[1][0], selection[1][1] | 93ad6f0f84dac8b4daed62fdfb9ee2cc52001349 | 32,689 |
def getColumnText(list, index, col):
""" Sets the text of a column """
item = list.GetItem(index, col)
return item.GetText() | edd096b06a6e7ad7cf6ee6e1b36aadbbc7ffe117 | 32,690 |
def poly(x, a):
"""
Constant function as model for the background.
"""
return [a for i in x] | 66a4db52ebaa8b5be92b249904363d5d0f79fb95 | 32,691 |
def commandsFromLine(line):
"""Extract uno commands name from lines like " 'Command1', 'Command2',"""
commands = []
inCommand = False
command = ''
for c in line:
if c == "'":
inCommand = not inCommand
# command ended, collect it
if not inCommand and command != '':
commands += [command]
command = ''
elif inCommand:
command += c
return commands | 4392a708281d3c60a3c02acafe0fb8a89997f43a | 32,692 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_cisco_umbrella_inv package"""
reload_params = {"package": u"fn_cisco_umbrella_inv",
"incident_fields": [],
"action_fields": [],
"function_params": [u"artifact_type", u"incident_id", u"umbinv_dns_type", u"umbinv_domain", u"umbinv_domains", u"umbinv_hash", u"umbinv_include_category", u"umbinv_ipaddr", u"umbinv_limit", u"umbinv_match", u"umbinv_offset", u"umbinv_regex", u"umbinv_resource", u"umbinv_sample_endpoint", u"umbinv_showlabels", u"umbinv_sortby", u"umbinv_start_epoch", u"umbinv_start_relative", u"umbinv_status_endpoint", u"umbinv_stop_epoch", u"umbinv_stop_relative"],
"datatables": [u"umbinv_as_for_an_ip_or_asn", u"umbinv_categories_for_a_domain", u"umbinv_category_identifiers", u"umbinv_classifiers_for_a_domain", u"umbinv_dns_rr_history_domain", u"umbinv_dns_rr_history_ip", u"umbinv_domain_co_occurrences", u"umbinv_domain_security_info", u"umbinv_domain_volume", u"umbinv_domain_whois_info_domain", u"umbinv_latest_malicious_domains_for_an_ip", u"umbinv_pattern_search_start_epoch", u"umbinv_pattern_search_start_relative", u"umbinv_related_domains_for_a_domain", u"umbinv_thread_grid_sample_info_for_a_hash_basic", u"umbinv_thread_grid_samples_for_a_resource", u"umbinv_timeline_for_a_resource"],
"message_destinations": [u"umbrella_investigate"],
"functions": [u"umbrella_classifiers", u"umbrella_dns_rr_hist", u"umbrella_domain_co_occurrences", u"umbrella_domain_related_domains", u"umbrella_domain_security_info", u"umbrella_domain_status_and_category", u"umbrella_domain_volume", u"umbrella_domain_whois_info", u"umbrella_ip_as_info", u"umbrella_ip_latest_malicious_domains", u"umbrella_pattern_search", u"umbrella_threat_grid_sample", u"umbrella_threat_grid_samples", u"umbrella_timeline"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"wf_umbrella_classifiers", u"wf_umbrella_dns_rr_hist_domain", u"wf_umbrella_dns_rr_hist_ip", u"wf_umbrella_domain_co_occurrences", u"wf_umbrella_domain_related_domains", u"wf_umbrella_domain_security_info", u"wf_umbrella_domain_status_and_category_cats", u"wf_umbrella_domain_status_and_category_dom", u"wf_umbrella_domain_volume", u"wf_umbrella_domain_whois_info", u"wf_umbrella_ip_as_info", u"wf_umbrella_ip_latest_malicious_domains", u"wf_umbrella_pattern_search_epoch", u"wf_umbrella_pattern_search_relative", u"wf_umbrella_threat_grid_sample", u"wf_umbrella_threat_grid_samples", u"wf_umbrella_timeline"],
"actions": [u"Example: AS Information for an ip address or ASN", u"Example: Categories for a domain", u"Example: Classifiers for a domain", u"Example: Co-occurences for a domain", u"Example: DNS RR history for a domain", u"Example: DNS RR history for an ip address", u"Example: Domain volume", u"Example: Domain WHOIS information for a domain", u"Example: Get list of category identifiers", u"Example: Latest Malicious Domains for an ip address", u"Example: Pattern search start epoch", u"Example: Pattern search start relative", u"Example: Related Domains for a Domain", u"Example: Security information for a domain", u"Example: ThreadGrid sample information for a hash", u"Example: ThreadGrid samples for a resource", u"Example: Timeline for a resource"],
"incident_artifact_types": []
}
return reload_params | bad9d6448476442625ff2091767bc6128800b221 | 32,693 |
import numpy as np
def user_mod(value, modulo):
"""
Modulo function that works for both positive and negative "value."
"""
return value-np.abs(modulo)*np.floor(value/np.abs(modulo)) | 7b7b47c05ad9fd3a8469cab91d4070e6958d3d20 | 32,694 |
from typing import Optional
def normalize_dewey(class_mark: str) -> Optional[str]:
"""
Normalizes Dewey classification to be used in call numbers
Args:
class_mark: Dewey classification
Returns:
normalized class_mark
"""
if isinstance(class_mark, str):
class_mark = (
class_mark.replace("/", "")
.replace("j", "")
.replace("C", "")
.replace("[B]", "")
.replace("'", "")
.strip()
)
try:
# test if has correct format
float(class_mark)
while class_mark[-1] == "0":
class_mark = class_mark[:-1]
return class_mark
except ValueError:
return None
else:
return None | 7cf7056902e6ac410b79deca9644a8cea5fae971 | 32,695 |
def _repeated_effect_func(*funcs):
"""
Return an (impure) function which does different things based on the
number of times it's been called.
"""
counter = [0]
def func():
count = counter[0]
counter[0] += 1
return funcs[count]()
return func | 7f8ee10c1afbae9874506514a131289cff48ea65 | 32,697 |
from functools import reduce
def count(l):
"""Count the number of elements in an iterator. (consumes the iterator)"""
return reduce(lambda x,y: x+1, l) | c30519261dbd6e02cd41d4df07607087cb7a6374 | 32,698 |
def check_commandline_inputs(n, limit):
"""See if the n and limit passed from the command line were valid"""
try:
n = int(n)
except:
raise ValueError("n wasn't a number")
valid_ns = [5, 7, 11, 17, 23, 29, 37, 47, 59, 71, 83, 97, 113, 131, 149,
167, 191, 223, 257, 293, 331, 373, 419, 467, 521]
if n not in valid_ns:
raise ValueError("n must be one of: {0}".format(valid_ns))
try:
limit = int(limit)
except:
raise ValueError("Limit wasn't a number")
return n, limit | 38432af09b6550349a942ea677a9e76c1616dbc7 | 32,699 |
def merge(df, merge_cfg, index_level="parent"):
"""
Change certain names in an index_level and merge
Parameters
----------
df : DataFrame-like
Dataframe input to change some index levels
merge_cfg : dict
Dictionary of new labels with values corresponding to a list of
index values to change into the new label
index_level : str (default, parent)
Index to change
"""
index_names = df.index.names
tdf = df.reset_index()
for label, match_labels in merge_cfg.items():
tdf.loc[tdf[index_level].isin(match_labels), index_level] = label
return tdf.groupby(index_names).sum() | 1b90da1c73fabf40b7bb3ddb1958cb02ad15025e | 32,700 |
def insert_cnpj(num):
"""
Cast a string of digits to the formatted 00.000.000/0001-00 CNPJ standard.
"""
cnpj = num[:2]+'.'+num[2:5]+'.'+num[5:8]+r'/'+num[8:12]+'-'+num[12:]
return cnpj | 973e6a1e0e0235e5390fec075a4ee6443df32841 | 32,702 |
import os
def directory_is_writable( path):
"""
Returns True if the specified directory exists and is writable
by the current user.
"""
return os.path.isdir( path) and os.access( path, os.W_OK) | ffcd6aab6872a985575cf5659d0a7f586380ea3b | 32,703 |
def _collect_paths(headerspace, ruleset):
""" Find the path packets a subset of packets will hit in a pipeline
headerspace: A BooleanShim
ruleset: Must be ordered and in single table form
return: A list of tuples (path, BooleanShim)
"""
collected = []
for rule in ruleset:
if headerspace.matches(rule.match.get_wildcard()):
collected.append((rule.path, headerspace.intersection(rule.match.get_wildcard())))
return collected | 691895526e9fa140506735f3990a4b490c28d819 | 32,705 |
def get_empty_action_space(num_actions):
"""
Returns an action space with nothing selected.
"""
return [0] * num_actions | 43dd1b10ba6737ca9a9f0926f3109754ba2737c9 | 32,708 |
from typing import get_origin
from typing import Sequence
def _is_collection_type(o) -> bool:
"""
Check whether the provided type/annotation is one which can hold elements. Necessarily since the minor versions
of Python 3 have evolving ways of comparing type annotations.
:param o: An annotation or type reference
:return: Whether it represents a type which can hold elements
"""
try:
# Py3.9+
cls = get_origin(o) or o
return issubclass(cls, Sequence)
except ImportError:
pass
# extract the base type if 'o' is an annotation
cls = o if type(o) == type else o.__orig_bases__[0]
return issubclass(cls, Sequence) | 386400e5a7e6ea5690bc080e31507e9de57fb193 | 32,709 |
def doc2vector(model, samples):
"""Infer vectors for samples
Args:
model: The instance to use to infer vectors vectors as :class:`gensim.models.Doc2Vec`.
samples: The samples as :class:`list`.
Returns:
The :class:`list` of inferred vectors.
"""
return [model.infer_vector(sample) for sample in samples] | 0d05ea36555e925fe11bdcde0ea182c169f0b374 | 32,710 |
def createCustomClass(className, superClass, attributeDict):
"""
more descriptive
"""
return type(className, superClass, attributeDict) | dfdd6023a85588e7e730d15193560139a6353787 | 32,711 |
def child_structure_dfs(sampler, seen=None):
"""Return the structure of a composed sampler using a depth-first search on its
children.
Args:
sampler (:obj:`.Sampler`):
:class:`.Structured` or composed sampler with at least
one structured child.
seen (set, optional, default=False):
IDs of already checked child samplers.
Returns:
:class:`~collections.namedtuple`: A named tuple of the form
`Structure(nodelist, edgelist, adjacency)`, where the 3-tuple values
are the :attr:`.Structured.nodelist`, :attr:`.Structured.edgelist`
and :attr:`.Structured.adjacency` attributes of the first structured
sampler found.
Raises:
ValueError: If no structured sampler is found.
Examples:
>>> sampler = dimod.TrackingComposite(
... dimod.StructureComposite(
... dimod.ExactSolver(), [0, 1], [(0, 1)]))
>>> print(dimod.child_structure_dfs(sampler).nodelist)
[0, 1]
"""
seen = set() if seen is None else seen
if sampler not in seen:
try:
return sampler.structure
except AttributeError:
# hasattr just tries to access anyway...
pass
seen.add(sampler)
for child in getattr(sampler, 'children', ()): # getattr handles samplers
if child in seen:
continue
try:
return child_structure_dfs(child, seen=seen)
except ValueError:
# tree has no child samplers
pass
raise ValueError("no structured sampler found") | 9cb6f997e12a93230ed18bf1121493f2365adf24 | 32,712 |
import itertools
def sorted_classes_from_index_dict(idx_dct):
""" Obtain classes from index dict, sorted by class index.
:param idx_dct: A dictionary mapping atom keys to class indices.
:type idx_dct: dict
:returns: A tuple of tuples of keys for each class, sorted by class
index.
:rtype: tuple[tuple[int]]
"""
keys = sorted(idx_dct.keys())
clas = sorted(keys, key=idx_dct.__getitem__)
cla_dct = tuple(
tuple(c) for _, c in itertools.groupby(clas, key=idx_dct.__getitem__))
return cla_dct | 3dd9c9a8b62c559fa8a300754fde2168c0d59fd1 | 32,715 |
def _get_option(options: dict, opt: str):
"""Dictionary look-up with flonb specific error message"""
if opt in options:
return options[opt]
raise ValueError(f"Missing option '{opt}'.") | ab048a65c3e92547085e9ce31c03e8fbd3b60558 | 32,716 |
import pandas
def extract_data(extract):
"""Extract necessary info about property and store it in a dataframe."""
the_list = list()
try:
results = extract["Results"]
for result in results:
the_dict = dict()
try:
the_dict['Bedroom'] = result['Building']['Bedrooms']
except:
the_dict['Bedroom'] = 'Not given'
try:
the_dict['Bathroom'] = result['Building']['BathroomTotal']
except:
the_dict['Bathroom'] = 'Not given'
try:
the_dict['Description'] = result['PublicRemarks']
except:
the_dict['Description'] = 'Not given'
try:
the_dict['Rent'] = result['Property']['LeaseRent']
except:
the_dict['Rent'] = 'Not given'
try:
the_dict['Address'] = result['Property']['Address']['AddressText']
except:
the_dict['Address'] = 'Not given'
try:
the_dict['Link'] = 'https://www.realtor.ca' + result['RelativeURLEn']
except:
the_dict['Link'] = 'Not given'
the_list.append(the_dict)
df = pandas.DataFrame(the_list)
return df
except:
print('Could not extract data about property.')
return None | 698c14da4962942de6bed4ec3a09e9b250836f27 | 32,718 |
def scale_to_bounds(x, lower_bound, upper_bound):
"""
Scale the input data so that it lies in between the lower and upper bounds.
Args:
:attr:`x` (Tensor `n` or `b x n`):
the input
:attr:`lower_bound` (float)
:attr:`upper_bound` (float)
Returns:
:obj:`torch.Tensor`
"""
# Scale features so they fit inside grid bounds
min_val = x.min()
max_val = x.max()
diff = max_val - min_val
x = (x - min_val) * (0.95 * (upper_bound - lower_bound) / diff) + 0.95 * lower_bound
return x | 3754d8eb8f4239ae301b5914cdf99df3a287c197 | 32,720 |
def get_ip_address(event):
"""
Retrieves the client IP address from an event
:param event: event
:return: client IP address
"""
if "headers" in event:
if "Client-Ip" in event["headers"]:
return event["headers"]["Client-Ip"]
if "X-Forwarded-For" in event["headers"]:
return event["headers"]["X-Forwarded-For"].split(",")[0]
if (
"requestContext" in event
and "identity" in event["requestContext"]
and "sourceIp" in event["requestContext"]["identity"]
):
return event["requestContext"]["identity"]["sourceIp"]
return "" | 2a919d84dce5bc84e0a527d8e8edc734797cb7d7 | 32,722 |
def remove_comment_wrappers_from_html(html_string):
"""This function removes comment wrappers (i.e. ``<!--`` and ``-->``) from an HTML string.
.. versionadded:: 2.5.1
:param html_string: The HTML string from which to remove comment wrappers
:type html_string: str
:returns: The HTML string with comment wrappers removed
"""
start_comment_position = html_string.find("<!--")
end_comment_position = html_string.find("-->")
last_start_comment_position = html_string.rfind("<!--")
last_end_comment_position = html_string.rfind("-->")
return html_string.replace(html_string[start_comment_position:end_comment_position + 3], "").replace(
html_string[last_start_comment_position:last_end_comment_position + 3], "") | a036650b35fb9b7bcc0217876996b5a6a870bfeb | 32,723 |
def partition(condition, iterable, output_class=tuple):
"""
split an iterable into two according to a function evaluating to either
true or false on each element
:param condition: boolean function
:param iterable: iterable to split
:param output_class: type of the returned iterables
:return: two iterables
"""
true = []
false = []
for i in iterable:
if condition(i):
true.append(i)
else:
false.append(i)
return output_class(true), output_class(false) | 678eea1acf22ee07bbcf41b57a00516a076c7cc3 | 32,724 |
def parse_metadata(metadata_field):
"""Cleans the metadata field, in case it's NaN, converts it to str."""
str_field = str(metadata_field)
if str_field == "nan":
return ""
return str_field | e1a449756bba1b7e78e796664eeedef190b85155 | 32,725 |
def SetColor(x):
"""
coloring scatter plots based on Network quality
"""
if x > 20:
return "green"
elif x > 15 and x <= 20:
return "yellow"
elif x >= 10 and x <= 15:
return "red"
elif x < 10:
return "red" | 9abd16707d02c13b1c157f946e1dc2a7c00a6e62 | 32,726 |
def mscale(matrix, d):
"""Return *matrix* scaled by scalar *d*"""
for i in range(len(matrix)):
for j in range(len(matrix[0])):
matrix[i][j] *= d
return matrix | a98be25a0e0977e5e1c55b2efd602b1f3fabc493 | 32,727 |
def disp_mic_npt(pos1, pos2, cell1, cell2):
"""MIC displacement when cell dimensions change"""
disp = pos2/cell2 - pos1/cell1
for i in range(3):
disp[i] -= round(disp[i]/cell2[i])
return disp | 5255676a17b4927950fc9ba566242b35f606a5a1 | 32,729 |
def get_file_paths(file_prefix, num_files):
"""
Generates the json file paths of the format <file_prefix>idx.json
where idx is an integer between 0 and num_files-1
:param file_prefix: The first part of the file path
that all files to be averaged have in common
:param num_files: The number of files to be averaged
:return: A list of files to be averaged
"""
files = []
for i in range(num_files):
files.append(f"{file_prefix}{i}.json")
return files | 08dd65503d4ccfff1b22c54e8002831b9e1db0b3 | 32,731 |
def is_foul_on_opponent(event_list, team):
"""Returns foul on opponent"""
is_foul = False
is_foul_pt2 = False
for e in event_list[:2]:
if e.type_id == 4 and e.outcome == 0 and e.team != team:
is_foul = True
elif e.type_id == 4 and e.outcome == 1 and e.team == team:
is_foul_pt2 = True
return is_foul and is_foul_pt2 | aa0522b2a7db514ab51f51e19ab28bbeb1a54879 | 32,733 |
def get_appliance_flow_bandwidth_stats(
self,
ne_id: str,
flow_id: int,
flow_seq_num: int,
) -> list:
"""Get the so far accumulated bandwidth stats about the flow
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - flow
- GET
- /flow/flowBandwidthStats/{neId}/q
:param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE``
:type ne_id: str
:param flow_id: Flow ID
:type flow_id: int
:param flow_seq_num: Flow sequence number
:type flow_seq_num: int
:return: Returns list of dictionaries for so far accumulated
bandwidth stats about the flow
:rtype: list[dict]
"""
return self._get(
"/flow/flowBandwidthStats/{}/q?id={}&seq={}".format(
ne_id, flow_id, flow_seq_num
)
) | 99672d1ad0b4adebded4905cfc81e95b8adba098 | 32,734 |
def min_scalar_prod(x, y):
"""Permute vector to minimize scalar product
:param x:
:param y: x, y are vectors of same size
:returns: min sum x[i] * y[sigma[i]] over all permutations sigma
:complexity: O(n log n)
"""
x1 = sorted(x) # make copies to preserve
y1 = sorted(y) # the input arguments
return sum(x1[i] * y1[-i - 1] for i in range(len(x1))) | 39c37bfdb296caf81178e2cdcb852ff46dab28a5 | 32,735 |
import os
def get_files(file_dir):
"""
Create list of training set geotif file names.
Create list of all the geotif files that will be sampled to make
the training set.
Parameters
----------
file_dir : `str`
Path to the directory holding all of the geotifs to be sampled
Returns
-------
f_names : `list`
List of the individual files contained in the directory with their
full pathing
"""
f_names = []
# check if file_dir provided with or without a forward slash as the
# final value -- if not there then append one
if file_dir[-1] != '/':
file_dir = file_dir + '/'
for i in os.listdir(file_dir):
f_names.append(file_dir + i)
return f_names | 8830b49212287a8c410ab5251d36abecf63bf737 | 32,737 |
def calc_thrust_power(block_count):
""" Calculate the power in 'e/sec' consumed when using a ships thrusters
Given the number of Thruster Modules, this function will return the power
per second (e/sec) consumed when thrusters are active.
Args:
block_count: The number of Thruster Modukes (block id 25)
Returns:
A float for 'e/sec' usage of active thrusters
"""
return block_count / 0.03 | 90ded217a487e867b3126049415e03d519593e24 | 32,739 |
def barycentric_to_cartesian(bary, vertices):
"""
Compute the Cartesian coordinates of a point with given barycentric coordinates.
:param bary: The barycentric coordinates.
:param vertices: The triangle vertices (3 by n matrix with the vertices as rows (where n is the dimension of the
space)).
:returns: The Cartesian coordinates vector.
:rtype: n-dimensional vector
"""
return vertices[0] * bary[0] + vertices[1] * bary[1] + vertices[2] * bary[2] | 3576f93d190ef52669a0ba80483dea88c75696ac | 32,740 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.