content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import hashlib
def get_hashfunc_by_name(name, data):
"""
Get a callable hashfunc by name.
This function can be used directly or with functools.partial, for example:
>>> hashfunc = functools.partial(get_hashfunc_by_name, 'sha256')
>>> hashfunc('sir robin').digest()
Args:
name (str): The string name of the desired algorithm
data (buffer): The buffer to hash
Returns:
callable: The callable hash function of the provided
algorithm updated with the data to be hashed
"""
hashfunc = hashlib.new(name)
hashfunc.update(data)
return hashfunc
|
f26c364dc6ba260a8b95fa317f4e6fcdf7af2bd3
| 75,964
|
import torch
def one_hot(class_n, x):
"""
Make scalar into one-hot vector
:param class_n: number of classes
:param x: the scalar
:return: converted one-hot vector
"""
return torch.eye(class_n)[x]
|
29ae8794f785a2983cefc7a94cc7a3732a7a746a
| 75,965
|
def remove_campaigns(api, assessment_id):
"""Remove all campaigns from an assessment."""
allCampaigns = api.campaigns.get()
for campaign in allCampaigns:
if campaign.name.startswith(assessment_id):
api.campaigns.delete(campaign.id)
return True
|
1b7b0f99798aa7aad115b15aa025ad596f212751
| 75,969
|
def replace(_, result, original, substitute):
""" Replace substrings within content. """
return result.replace(original, substitute)
|
e78cc7c00aa6620ef5ed9bb0e469f9ecdc0524f0
| 75,970
|
def stacks_to_pyramid_seq(stacks):
""" Converts a list of stacked layers to a list of Laplacina pyramids. """
pyramid_seq = list()
nr_frames = len(stacks[0])
for frame_idx in range(nr_frames):
pyramid = list()
for stack in stacks:
pyramid.append(stack[frame_idx])
pyramid_seq.append(pyramid)
return pyramid_seq
|
993ef8603e26e82425a87fc7e9b0487d9c53e343
| 75,974
|
def set_request_params(request):
""" Sets request parameters to format
Arg:
request (Request): Http request information.
Return:
dict: Return request parameters into formats.
"""
return {
"sys_id": request.args.get('sys_id'),
"sys_master_id": request.args.get('sys_master_id'),
"target_name": request.args.get('target_name')
}
|
8fa003c184b9705eb084b2afecc5f60bae3aa77a
| 75,977
|
def parse_vector_line(line):
"""
Takes a line from the .dat file formated according to Bodur style and creates a list of its values.
E.g., "[10,10,10]" -> [10, 10, 10]
:param line: string - the line with vector values, e.g., "[10,10,10]" or "[[10,10,10]," if it is the first vector of matrix.
:return: list with vector elements
"""
old_line_size = len(line)
line = line.strip() # sometimes we have trailing spaces
if (old_line_size != len(line)):
print(f"WARNING: Spaces at the beginning or at the end of the line are found in the .dat file.")
line_start = line[:2]
line_end = line[-2:]
if line_start == "[[" and line_end == "]]": # vector of the matrix that has only one row, e.g., [[10,10,10]]
clean_line = line[2:-2]
elif line_start == "[[": # first vector of matrix, e.g., [[10,10,10],
clean_line = line[2:-2]
elif line_end == "],": # vector of matrix, e.g., [10,10,10],
clean_line = line[1:-2]
elif line_end == "]]": # last vector of matrix, e.g., [10,10,10]]
clean_line = line[1:-2]
else: # vector, e.g., [10,10,10]
assert(line[0] == "[" and line[-1] == "]")
clean_line = line[1:-1]
vector = clean_line.split(",")
vector_numerical = []
for element in vector:
element_numerical = float(element)
# keep it as integer if it is one
if element_numerical.is_integer(): element_numerical = int(element)
vector_numerical.append(element_numerical)
return vector_numerical
|
908284d308db1ebf5a027874b57ae4f1ed1b1fc1
| 75,979
|
import math
def angle_closeness(a, b, error):
"""Determine the closeness of two angles, in radians, if they are witin the error closeness, in radians.
# Simple angles.
>>> angle_closeness(1, 1.1, .2)
True
>>> angle_closeness(1, 1.2, .1)
False
# Negative angles.
>>> angle_closeness(-1, -1.1, .2)
True
>>>
angle_closeness(-1, -1.2, .1)
False
# Zero angles.
>>> angle_closeness(-.1, .1, .3)
True
>>> angle_closeness(.1, -.1, .3)
True
>>> angle_closeness(.1, -.1, .1)
False
>>> angle_closeness(-.1, .1, .1)
False
# Angles near PI.
angle_closeness(3.1, 3.0, .2)
True
angle_closeness(-3.1, -3.0, .2)
True
angle_closeness(-3.1, 3.1, .2)
True
angle_closeness(3.1, -3.1, .2)
True
angle_closeness(3.1, 3.0, .05)
False
angle_closeness(-3.1, -3.0, .05)
False
angle_closeness(-3.1, 3.1, .05)
False
angle_closeness(3.1, -3.1, .05)
False
"""
if abs(a - b) < error:
return True
else:
if a < 0:
a += math.pi * 2
elif b < 0:
b += math.pi * 2
return abs(a - b) < error
|
12cca6d2edfd4756ac1d1d3515ad79aaef6631aa
| 75,984
|
from typing import List
from typing import Optional
def present_menu(devices: List[dict], saved_device: Optional[dict] = None) -> dict:
""" Format the menu of selectable microphones and receive selection from user.
Parameters
----------
devices : List[dict]
Device logs for detected microphones
saved_device : Optional[dict]
The device log for the saved device - used to indicate on
the menu the current saved device.
Returns
-------
dict
The device log for the user-selected device."""
# Print the menu
menuIndex = 0
for device in devices:
# If the user previously saved a device, indicate that in the menu with an asterisk
isSavedSelection = int(saved_device['index']) == device['index'] \
if saved_device is not None \
else False
savedSelectionText = '*' if isSavedSelection else ' '
print('{}) {} {}'.format(str(menuIndex), savedSelectionText, device['name']))
menuIndex = menuIndex + 1
# Get the user's input and select the input device
inputValid = False
menuSelection = 0
while not inputValid:
menuInput = input(" >> ").strip()
if menuInput.isdecimal():
menuSelection = int(menuInput)
if menuSelection <= menuIndex:
inputValid = True
else:
print("invalid input")
else:
print("invalid input")
return devices[menuSelection]
|
fa773257092a5c487b51badd5fa027b8e9cf7943
| 75,989
|
def get_style_version(request):
"""Return a string with the 'style' for the app.
- 'lite' means to remove headers and other information.
- 'standard' is the standard version with the title and info.
- anything else is adding the header bar and Plausible script.
This is the default option.
"""
return request.environ.get("HTTP_X_APP_STYLE", "")
|
bfc98524e18b48358ec1c8b22421f01684830e49
| 75,991
|
import shutil
def test_output_folder(project_root_path):
"""(Re)make an output folder for the tests
at the begining of each test session."""
folder = project_root_path / 'mfexport/tests/tmp'
reset = True
if reset:
if folder.is_dir():
shutil.rmtree(folder)
folder.mkdir(parents=True)
return folder
|
21ea843921e71b2b9817060d5b27f81e6fc07616
| 75,995
|
def _timedeltaToSeconds(offset):
"""
Convert a datetime.timedelta instance to simply a number of seconds.
For example, you can specify purely second intervals with timedelta's
constructor:
>>> td = datetime.timedelta(seconds=99999999)
but then you can't get them out again:
>>> td.seconds
35199
This allows you to:
>>> import epsilon.extime
>>> epsilon.extime._timedeltaToSeconds(td)
99999999.0
@param offset: a L{datetime.timedelta} representing an interval that we
wish to know the total number of seconds for.
@return: a number of seconds
@rtype: float
"""
return ((offset.days * 60*60*24) +
(offset.seconds) +
(offset.microseconds * 1e-6))
|
7c53118304373983de6ec67cc621e2f022432e9f
| 76,009
|
def average(A_matrices, B_vectors, timesteps):
"""
Average over matrices.
Arguments:
A_matrices: a dictionary of NxN matrices, indexed with their timestep.
B_vectors: a dictionary of vectors with len N, indexed with their timestep.
timesteps: a list or tuple of timesteps.
Returns:
A: the average of all A_matrices.
B: the average of all B_matrices.
"""
# Initialize empty
time = list(B_vectors.keys())[0]
A = A_matrices[time] * 0
B = B_vectors[time] * 0
# Average by adding all objects and dividing by their number
for timestep in timesteps:
A += A_matrices[timestep]
B += B_vectors[timestep]
# Divide
number_snapshots = len(timesteps)
A /= number_snapshots
B /= number_snapshots
return A, B
|
0d048bfedadf63698b4a3098e2482dc1f8d459a4
| 76,011
|
def _breadth_first_search(g, v):
"""
Return, for each node u, the breadth first search distance from v to u in the graph g.
For each node u, returns the distance of a shortest path from v to u in an undirected unweighted graph using the
breadth first search algorithm.
:param g: an undirected graph
:param v: a node of g
:return: a dictionary associating for each node u the distance of a shortest path from v to u in g
"""
to_visit = [v]
visited = set()
dist = {v: 0}
while len(to_visit) != 0:
u = to_visit.pop(0)
visited.add(u)
for v in u.neighbors:
if v in visited:
continue
if v not in dist or dist[v] > dist[u] + 1:
dist[v] = dist[u] + 1
to_visit.append(v)
return dist
|
0329001e68dafaa7e9e91a39a62fb0d4a52ffeca
| 76,012
|
def read_gmt_multisegment_latlon(input_file, split_delimiter=' '):
"""
Generalized GMT multisegment file reader.
Returns lon and lat in a list of lists, each element with a single segment.
:param input_file: name of input file
:type input_file: string
:param split_delimiter: delimiter between values on the same line, defaults to space
:type split_delimiter: string, optional
:returns: list of lons, list of lats
:rtype: list
"""
print("reading gmt multisegment file %s" % input_file);
ifile = open(input_file);
lon_collection = [];
lat_collection = [];
lon_temp = [];
lat_temp = [];
for line in ifile:
if line.split()[0] == '>>' or line.split()[0] == '>':
if lon_temp:
lon_collection.append(lon_temp);
lat_collection.append(lat_temp);
lon_temp = [];
lat_temp = [];
continue;
else:
temp = line.split(split_delimiter);
lon_temp.append(float(temp[0]));
lat_temp.append(float(temp[1]));
lon_collection.append(lon_temp);
lat_collection.append(lat_temp);
return lon_collection, lat_collection;
|
b88afb943710222215bd9f27c875153f2954d359
| 76,014
|
from datetime import datetime
def main(config):
"""
Principal function to get health.
:param config: json with the application configuration
:return: dict with status
"""
app_name = config.get("app_name")
return {
"app_name": app_name,
"statusCode": 200,
"message": "ALIVE",
"timestamp": str(datetime.now())
}
|
f48769a11e9a0528a3ab0de0a332cbdb9f3fb61b
| 76,018
|
import logging
def MpiVars(vm) -> str:
"""Returns the path to the mpivars.sh file.
With different versions of Intel software installed the mpivars.sh for
2019.6 can be under compilers_and_libraries_2020.0.166 while the symlink
for compilers_and_libraries points to compilers_and_libraries_2018
Args:
vm: Virtual machine to look for mpivars.sh on.
"""
txt, _ = vm.RemoteCommand('readlink -f /opt/intel/compilers_and_libraries*/'
'linux/mpi/intel64/bin/mpivars.sh | sort | uniq')
files = txt.splitlines()
if not files:
raise ValueError('Could not find the mpivars.sh file')
if len(files) > 1:
logging.info('More than 1 mpivars.sh found, returning first: %s', files)
return files[0]
|
6a82abedd0492455e82e3dedb70d2d8ce7c55132
| 76,020
|
def _estimator_has(attr):
"""Checks if the model has a given attribute.
Meant to be used along with `sklearn.utils.metaestimators.available_if`
Parameters
----------
attr : str
The attribute to check the calling object for
Returns
-------
fn : callable
A function that will either raise an `AttributeError` if the attribute
does not exist, or True if it does.
"""
def check(self):
# raise original `AttributeError` if `attr` does not exist
getattr(self, attr)
return True
return check
|
487d2a5e2bb9ca92927aa7855fecea45ca2d20c7
| 76,022
|
import math
from functools import reduce
def lcm(numbers_list):
"""Return lowest common multiple."""
def lcm(a, b):
return (int(a) * int(b)) // math.gcd(int(a), int(b))
return reduce(lcm, numbers_list, 1)
|
8991ab26984f82f201454f3b26376e3ec8b1085f
| 76,028
|
def set_iscsi_discovery_auth(
client,
disable_chap=None,
require_chap=None,
mutual_chap=None,
chap_group=None):
"""Set CHAP authentication for discovery service.
Args:
disable_chap: CHAP for discovery session should be disabled (optional)
require_chap: CHAP for discovery session should be required (optional)
mutual_chap: CHAP for discovery session should be mutual (optional)
chap_group: Authentication group ID for discovery session (optional)
Returns:
True or False
"""
params = {}
if disable_chap:
params['disable_chap'] = disable_chap
if require_chap:
params['require_chap'] = require_chap
if mutual_chap:
params['mutual_chap'] = mutual_chap
if chap_group:
params['chap_group'] = chap_group
return client.call('set_iscsi_discovery_auth', params)
|
1ef91da9afb4df7c16a55b5b706ccae3b4cb450f
| 76,029
|
def sort_keywords(keywords):
"""Sort keywords in the proper order: i.e. glob-arches, arch, prefix-arches."""
def _sort_kwds(kw):
parts = tuple(reversed(kw.lstrip('~-').partition('-')))
return parts[0], parts[2]
return sorted(keywords, key=_sort_kwds)
|
83e3af245e2fa7d4c48d3f3edbe7df7a2705d403
| 76,030
|
def diagonal(mat, diag_index):
"""Returns ith diagonal of matrix, where i is the diag_index.
Returns the ith diagonal (A_0i, A_1(i+1), ..., A_N(i-1)) of a matrix A,
where i is the diag_index.
Args:
mat (2-D list): Matrix.
diag_index (int): Index of diagonal to return.
Returns:
Diagonal of a matrix.
"""
return [mat[j % len(mat)][(diag_index + j) % len(mat)] for j in range(len(mat))]
|
baa0717254a5cef6972b0c18942ca111e54c5949
| 76,032
|
def import_file_section(file, start_token, end_token):
"""
Load a section of lines between two tokens.
Parameters
----------
file: str or pathlib Path
Path to the file.
start_token: str
String in line to start reading file from.
end_token:
String in line to end reading file from.
Returns
-------
c_set: list
"""
spl_lin = lambda x: [e for e in x.strip("\n").split(",") if e != ""]
readstate = False
c_set = []
with open(file, "r", encoding="latin-1") as f:
for _, line in enumerate(f):
if start_token in line:
readstate = True
line = next(f)
if end_token in line:
readstate = False
if readstate:
newline = spl_lin(line)
c_set.append(newline)
return c_set
|
167d86fdc518b01d3a1145c42fe8882b60003e2a
| 76,033
|
def hour_to_sec(hours: float) -> float:
"""Method to parse hours to seconds"""
return hours * 3600
|
ce9cb3eccfc6e199074b28a57cc8e913dd2a3f51
| 76,034
|
from pathlib import Path
def is_within(file: Path, excluded: Path) -> bool:
"""Check if `file` is `excluded` or within `excluded`'s tree.
"""
return excluded in file.parents or file == excluded
|
8f20be21a833bddc3b2fa569e823cbe57b15ffd5
| 76,036
|
def cadence(a, b, required_gap, start):
"""
For the pair of numbers determine when they first repeat with the required gap
and the period that it will repeat, starting at the given value
>>> cadence(67, 7, 1, 0)
(335, 469)
>>> cadence(67, 7, 2, 0)
(201, 469)
>>> cadence(1789, 37, 1, 0)
(30413, 66193)
>>> cadence(17, 13, 21, 0)
(187, 221)
"""
value = start
first = None
while True:
if (value + required_gap) % b == 0:
if first is None:
first = value
else:
return (first, value - first)
value += a
|
246b2a38c6354d4178e881ffea0ac63084f5b1df
| 76,037
|
def parse_evidence(evidence):
"""
From an evidence string/element return a dictionary or obs/counts
Updated where to handle 0 coverage in an 'N' call! In this case we set
N = -1
:param evidence: an evidence string. It looks something like this -
Ax27 AGCAx1 AGCAATTAATTAAAATAAx
"""
obs_count = {}
elem = evidence.split(' ')
if elem == ['']:
obs_count['N'] = -1
else:
for e in elem:
obs, count = e.split('x')
obs_count[obs] = int(count)
return obs_count
|
53132481dc1f75d1ba9c234cf35581a4b502afc5
| 76,038
|
def year_list(x):
"""Return the elements of x that can be cast to year (int)."""
lst = []
for i in x:
try:
int(i) # this is a year
lst.append(i)
except ValueError:
pass
return lst
|
73e9e07f70850d0cb2edf9c04861db1264ee590a
| 76,044
|
def compute_ema_from_list(ema0, y_values, alpha=0.1):
"""ema_t+n = (1-α)^n*ema + αΣ(1-α)^(n-i)*y_i"""
assert isinstance(y_values, list), "y_values has to be a list"
n = len(y_values)
alphy = 1-alpha
return (alphy)**n*ema0 + alpha*sum([y_val*alphy**(n-1-i) for i,y_val in enumerate(y_values)])
|
ef5663708da308d2acd6a4de1a3b07891618fbf6
| 76,045
|
from pathlib import Path
def bench_name(map_file, scen_file) -> str:
"""Return a name for the benchmark, composed of file names of map and scenario without suffixes, separated by a dash"""
return f"{Path(map_file).stem}-{Path(scen_file).stem}"
|
bbeb5dc4700e48ed0166a295da85a676010eaad0
| 76,050
|
def istmp(name):
"""Is this a name of a tmp variable?"""
return name.startswith("tmp")
|
45eee9ab7789641aac5b24c7cecb6ead19693477
| 76,052
|
import shutil
def print_full_width(char, mid_text="", whitespace=" ", ret_str=False, **kwargs):
"""
Prints a character at the full terminal width. If ``mid_text`` is supplied, this text is printed
in the middle of the terminal, surrounded by ``whitespace``. Additional kwargs passed to
``print``.
If ``ret_str`` is true, the string is returned; if not, it is printed directly to the console.
"""
cols, rows = shutil.get_terminal_size()
if mid_text:
left = cols - len(mid_text) - 2 * len(whitespace)
if left <= 0:
left = 2
l, r = left // 2, left // 2
if left % 2 == 1:
r += 1
out = char * l + whitespace + mid_text + whitespace + char * r
else:
out = char * cols
if ret_str:
return out
print(out, **kwargs)
|
04ed511123609a44a235b550d195320c639f14c4
| 76,055
|
def center(size, fit_size, offset):
"""
Center a given area within another area at an offset.
Arguments:
size: a tuple containing the width and height of the area to be centered.
fit_size: a tuple containing the width and heigh of the area in which to
center 'size'
offset: a tuple representing an x/y coordinate of the offset.
"""
w, h = size
fw, fh = fit_size
x, y = offset
return x + (fw - w) // 2, y + (fh - h) // 2
|
6406d9e26eccb31d01e507b813a81fa26ac84f5c
| 76,058
|
def delist(list_obj):
"""
Returns a copy of `list_obj` with all empty lists and tuples removed.
Parameters
----------
list_obj : list
A list object that requires its empty list/tuple elements to be
removed.
Returns
-------
delisted_copy : list
Copy of `list_obj` with all empty lists/tuples removed.
"""
# Check if list_obj is a list
if(type(list_obj) != list):
raise TypeError("Input argument 'list_obj' is not of type 'list'!")
# Make a copy of itself
delisted_copy = list(list_obj)
# Remove all empty lists/tuples from this copy
off_dex = len(delisted_copy)-1
for i, element in enumerate(reversed(delisted_copy)):
# Remove empty lists
if(isinstance(element, list) and element == []):
delisted_copy.pop(off_dex-i)
# Remove empty tuples
elif(isinstance(element, tuple) and element == ()):
delisted_copy.pop(off_dex-i)
# Return the copy
return(delisted_copy)
|
a259e703383ac89e2274956288458bbe851fa225
| 76,059
|
import requests
def hook_exists(full_name, token):
""" Return True if a hook for the repository is listed on travis. """
headers = {
'Authorization': 'token %s' % token,
}
response = requests.get(
'http://api.travis-ci.org/hooks', headers=headers
)
owner, name = full_name.split('/')
if response.status_code == 200:
repositories = [
repo for repo in response.json()
if repo['name'] == name and repo['owner_name'] == owner
]
hook_exists = len(repositories) > 0
else:
hook_exists = False
return hook_exists
|
12cc41a5ae3870ab9e71d8b583e21708e80e07b7
| 76,060
|
def CreateMnemonicsPython(mnemonicsIds):
""" Create the opcodes dictionary for Python. """
s = "Mnemonics = {\n"
for i in mnemonicsIds:
s += "0x%x: \"%s\", " % (mnemonicsIds[i], i)
if len(s) - s.rfind("\n") >= 76:
s = s[:-1] + "\n"
# Fix ending of the block.
s = s[:-2] # Remote last comma/space we always add for the last line.
if s[-1] != "\n":
s += "\n"
# Return mnemonics dictionary only.
return s + "}"
|
4aab5f65413e8f2841d52a73b3e3a28396c867ec
| 76,071
|
import copy
import warnings
def get_list_values_larger_than_ref(list_val, ref_val):
"""
Returns list of values in list_val, which are larger than ref_val
Parameters
----------
list_val : list (of floats)
Value list
ref_val : float
Reference value, which defines lower bound for search
Returns
-------
list_larger : list (of floats)
List holding all values of list_val, which are larger than or equal to
ref_value
"""
list_larger = copy.copy(list_val)
for val in list_val:
if val < ref_val:
list_larger.remove(val)
if list_larger == []:
msg = 'list_larger is empty list. Thus, going to use largest values' \
' of original list list_val.'
warnings.warn(msg)
list_larger.append(max(list_val))
return list_larger
|
3cdbd985e58ed67309a88681bacc7773e92b4a97
| 76,072
|
import re
def count_lines_with_wrapping(s, linewidth=80):
"""Count the number of lines in a given string.
Lines are counted as if the string was wrapped so that lines are never over
linewidth characters long.
Tabs are considered tabwidth characters long.
"""
tabwidth = 8 # Currently always true in Shell.
pos = 0
linecount = 1
current_column = 0
for m in re.finditer(r"[\t\n]", s):
# Process the normal chars up to tab or newline.
numchars = m.start() - pos
pos += numchars
current_column += numchars
# Deal with tab or newline.
if s[pos] == '\n':
# Avoid the `current_column == 0` edge-case, and while we're
# at it, don't bother adding 0.
if current_column > linewidth:
# If the current column was exactly linewidth, divmod
# would give (1,0), even though a new line hadn't yet
# been started. The same is true if length is any exact
# multiple of linewidth. Therefore, subtract 1 before
# dividing a non-empty line.
linecount += (current_column - 1) // linewidth
linecount += 1
current_column = 0
else:
assert s[pos] == '\t'
current_column += tabwidth - (current_column % tabwidth)
# If a tab passes the end of the line, consider the entire
# tab as being on the next line.
if current_column > linewidth:
linecount += 1
current_column = tabwidth
pos += 1 # After the tab or newline.
# Process remaining chars (no more tabs or newlines).
current_column += len(s) - pos
# Avoid divmod(-1, linewidth).
if current_column > 0:
linecount += (current_column - 1) // linewidth
else:
# Text ended with newline; don't count an extra line after it.
linecount -= 1
return linecount
|
821654d00ae65153a57a6dbca25fd666616147ac
| 76,083
|
def _get_sequence_from_coords(len_prots, proteome_sequence,
prot, start, end):
"""
Gets the amino acid sequence at specified coordinates.
Parameters
----------
len_prots : dict
A dictionary mapping protein names to lengths.
proteome_sequence : function
A closure that returns the sequence at given coordinates.
prot : str
The name of a protein, e.g. "YFP".
start : int
The 0-based start coordinate of the first position in the
sequence.
end : int
One past the 0-based last position in the sequence.
Returns
-------
str
The amino acid sequence.
"""
if start > len_prots[prot] or (end > len_prots[prot] + 1) or start < 0:
return ""
return proteome_sequence(prot, start, end)
|
26c62d63adb964e296b623df773046e56de247a1
| 76,092
|
def has_extension(conn, name):
"""Checks if the postgres database has a certain extension installed"""
return conn.execute("SELECT EXISTS(SELECT TRUE FROM pg_extension WHERE extname = %s)", (name,)).scalar()
|
6ef0c54d7b520db7e61c602c1b1683cf6310410a
| 76,096
|
def accumulate_demand_sequence(demand_sequence: list) -> dict:
"""
:param demand_sequence: a list of tuples with [(src, dst, demand_size)]
:return demand_matrix: accumulated demand matrix with {(src, dst) : demand_size}
"""
demand_matrix = dict()
for src, dst, demand_size in demand_sequence:
if (src, dst) not in demand_matrix:
demand_matrix[(src, dst)] = 0
demand_matrix[(src, dst)] += demand_size
return demand_matrix
|
d4f8dc1ce73851730b83a994e9e15c44582a1d81
| 76,097
|
def find_children(words, word):
"""Find all children of a word, i.e. words that are revealed
by dropping a single letter.
words: dictionary of words
word: string
"""
children = []
for i in range(len(word)):
possible_child = word[:i] + word[i+1:]
if possible_child in words:
children.append(possible_child)
return children
|
6790f12b18de2dd700bce7e1c0cddb09b588b4a0
| 76,099
|
import re
def uncamel(s):
"""
Convert CamelCase class names into lower_snake_case.
Taken from http://stackoverflow.com/a/1176023/3288364
"""
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower()
|
21990e0df7d590e03d8b9ea61bb2bb4ef4b6e6d6
| 76,100
|
def list_to_pairs(l):
"""
From list(list(str)) ) to list([str, str])
>>> l = [[1, 2, 3], [45, 46], [98, 99, 100, 101]]
>>> list_to_pairs(l)
[[1, 2], [1, 3], [2, 3], [45, 46], [98, 99], [98, 100], [98, 101], [99, 100], [99, 101], [100, 101]]
:param l:
:return:
"""
res = []
for i in l:
length_i = len(i)
for j in range(length_i-1):
for k in range(j+1, length_i):
res.append([i[j], i[k]])
return res
|
e417c1f27f7e2849df673c719cf6ea1f408612c6
| 76,101
|
import operator
def between(min_value, value, max_value, partial_order=operator.le):
"""Check if Value is Inbetween Min and Max."""
return partial_order(min_value, value) and partial_order(value, max_value)
|
3e2d61a510397949798b311d5821e5ce0a19df34
| 76,103
|
import json
def parse_user_parameters(params_str):
"""Parses a given UserParameters string.
A UserParameters string must be a JSON object.
:type params_str: str
:param params_str: UserParameters string to be parsed.
:rtype: dict
:return: parsed user parameters.
:raises JSONDecodeError: if ``params_str`` is not a valid JSON text.
:raises ValueError: if ``params_str`` is not a JSON object.
"""
params = json.loads(params_str)
if not isinstance(params, dict):
raise ValueError(f'UserParameters must be a JSON object not {type(params)}')
return params
|
2e464aba708b2249323c3eec4855d72e8900eac3
| 76,104
|
def calc_triangle_area(base, height):
"""
Calculates the area of the triangle
:param base:
:param height:
:return: area
"""
return (base * height) / 2
|
89c74c3ed9629f3459604c57a2c15a0a681790ff
| 76,109
|
def clean_url(url: str) -> str:
"""
Removes unwanted characters from the end of certain URLs.
Args:
url (:obj:`str`):
A URL.
Returns:
:obj:`str`:
A URL without unwanted characters at the end.
"""
if 'achgut' in url and '/P' in url:
return url.rsplit('/P', 1)[0]
if 'focus' in url:
return url.rsplit('_id_', 1)[0]
if 'sueddeutsche' in url and '1.508' in url:
return url.rsplit('-', 1)[0]
if 'opposition24' in url:
return url.rsplit('/?', 1)[0]
return url
|
1ec2bc66dbc9915cd8a7f6ef9e4a0818c4171ca4
| 76,115
|
def creator_instrument(lower, upper):
"""
Generates the aproximated instrument string based in the frequencies, to use directly with CallistoSpectrogram
"""
lower = int(lower)
upper = int(upper)
if lower>=1200 and upper<=1800 : return "BLEN5M"
if lower>=110 and upper<=870 : return "BLEN7M"
#The Upper value is set to 110 in order to download a bigger wide of flares
if lower>=20 and upper<=110 : return "BLENSW"
|
d6687d30be0f0ec2bd7ca1a64b4938400bde9ed0
| 76,119
|
def getErrorPair(model, pair):
"""
get error for the given pair of points and the model.
ie calculate error = (p' - pcalc)**2,
with pcalc = model * p
"""
tx = model[0]
ty = model[1]
p = pair[0]
pprime = pair[1]
x = p[0]
y = p[1]
xprime = pprime[0]
yprime = pprime[1]
xcalc = x + tx
ycalc = y + ty
xdiff = xcalc-xprime
ydiff = ycalc-yprime
err = xdiff**2 + ydiff**2
return err
|
e50f995daad306ce2bb2a521460145e9fbfb4303
| 76,123
|
def resize_image(image, size=None, fx=1.0, fy=1.0):
"""Resize image to new size. If new_size is None, scaling factors fx and fy are used instead."""
if size is None:
width = int(fx * image.size[0])
height = int(fy * image.size[1])
new_size = (width, height)
else:
new_size = size
return image.resize(new_size)
|
d361f5d4d54e2b90d4e5ecb8838b70b7f56937d8
| 76,129
|
def remove_self(dictionary):
"""Remove entry with name 'self' from dictionary.
This is useful when passing a dictionary created with locals() as kwargs.
Args:
dictionary: Dictionary containing 'self' key
Returns:
dictionary without 'self' key
"""
del dictionary['self']
return dictionary
|
e4e4b827bf993a4089d2cb116455b3cdd330f4ac
| 76,130
|
def _proteinTagPresent(fastaHeader, tag):
"""Checks wheter a tag string is present in the fastaHeader.
:param fastaHeader: str, protein entry header from a fasta file
:returns: bool, True if tag is present in fastaHeader
"""
return (tag in fastaHeader)
|
eb732c3fe0d0d3a09a74b0d4706bf950e5606509
| 76,131
|
from typing import Counter
def in_degrees_by_uid(conv):
"""
Returns a Counter of the post IDs mapping to the # of replies that post received in this Conversation
Parameters
----------
conv : Conversation
A collection of posts
Returns
-------
Counter
A mapping from post IDs to the # of replies they receive in `conv`
"""
cnt = Counter()
for p in conv.posts.values():
if p.uid not in cnt:
cnt[p.uid] = 0
for r in p.reply_to:
if r in conv.posts:
cnt[r] += 1
return cnt
|
251b63874a55a8f9f3daece033c036b1b261cd53
| 76,132
|
def AxisAng3(expc3):
"""Converts a 3-vector of exponential coordinates for rotation into
axis-angle form
:param expc3: A 3-vector of exponential coordinates for rotation
:return omghat: A unit rotation axis
:return theta: The corresponding rotation angle
Example Input:
expc3 = np.array([1, 2, 3])
Output:
(np.array([0.26726124, 0.53452248, 0.80178373]), 3.7416573867739413)
"""
return (expc3.normalized(), expc3.norm())
|
5c554bddc6eec2d837e87f5fa140e03d793a448a
| 76,134
|
def _add_padding(byte_list, intended_length, padding_byte=0x00):
"""
Pads a list with 0's so that it reaches the intended length.
"""
for i in range(intended_length - len(byte_list)):
byte_list.append(padding_byte)
return byte_list
|
e742e19f0f4ef69fbdf381c9c52969f9e17347ec
| 76,138
|
def should_load_from_existing_db(database_loader, cursor, *, config):
"""
Helper method to determine whether or not a backup database should be loaded to begin
test execution. A backup db should be used if that backup exists, and if the pytest config
options don't indicate that the database should be freshly created to start the the test
suite execution.
Args:
database_loader (DatabaseLoader): A DatabaseLoader instance
cursor (django.db.connection.cursor): A database cursor
config (Config): The pytest configuration
Returns:
bool: Whether or not a backup database should be loaded to begin test execution
"""
# We should load a db backup to start the test suite if that backup exists,
# and if the config options don't indicate that the database should be freshly
# created to start the the test suite execution
return (
config.option.reuse_db and
not config.option.create_db and
database_loader.has_backup(db_cursor=cursor)
)
|
975eccafe56935f66ca3e89aa7d91aad4dff191f
| 76,139
|
def cycle_list(k, n):
"""
Returns the elements of the list ``range(n)`` shifted to the
left by ``k`` (so the list starts with ``k`` (mod ``n``)).
Examples
========
>>> from sympy.crypto.crypto import cycle_list
>>> cycle_list(3, 10)
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
"""
k = k % n
return list(range(k, n)) + list(range(k))
|
0be03fd6e1a11c656b632161a3cd302bb88b769b
| 76,140
|
def GetHealthChecks(args, resource_parser):
"""Returns health check URIs from arguments."""
health_check_refs = []
if args.http_health_checks:
health_check_refs.extend(resource_parser.CreateGlobalReferences(
args.http_health_checks, resource_type='httpHealthChecks'))
return [health_check_ref.SelfLink() for health_check_ref in health_check_refs]
|
38b8ab7ff6c409f2838b21967d19e2fccc234daf
| 76,144
|
def get_dataset(training_info_txt: str) -> str:
"""get dataset name from training_info.txt
Args:
training_info_txt (str): path to training_info.txt in checkpoints dir
Returns:
str: dataset name
"""
with open(training_info_txt, "r") as f:
dataset = f.read().split("\n")[0].split(":")[-1].replace(" ", "")
return dataset
|
778eba4f0666cb8879d626cf7a411764a98f5da5
| 76,147
|
def _difference_in_years(start, end):
""" calculate the number of years between two dates """
diff = end - start
diff_in_years = (diff.days + diff.seconds/86400)/365.2425
return diff_in_years
|
78ed24bcb2863a7150748ef557af86aec3358554
| 76,148
|
def phase_structure(phase_a, phase_b, axis=-1):
"""Structure function for phase fluctuations.
See Daigle, equation 18.
The last term accounts for the fact that the mean phase difference of the signals may be nonzero.
"""
return ((phase_a-phase_b)**2.0).mean(axis=axis) - ((phase_a-phase_b).mean(axis=axis))**2.0
|
c402b34ea7b682c81c33516c896e67ecbaf41740
| 76,149
|
def parse_exif_string(exif_tag):
"""
Parses given exif tag assuming it is a string and return its value.
Parameters
----------
exif_tag : ExifTag
Exif tag to parse.
Returns
-------
str
Parsed exif tag value.
"""
return str(exif_tag.value)
|
db95e0eede84aa253ded122f0667aca04e038403
| 76,153
|
def _toOperation(info, resource, handler):
"""
Augment route info, returning a Swagger-compatible operation description.
"""
operation = dict(info)
operation['tags'] = [resource]
# Operation Object spec:
# Unique string used to identify the operation. The id MUST be unique among
# all operations described in the API.
if 'operationId' not in operation:
operation['operationId'] = str(resource) + '_' + handler.__name__
return operation
|
a7e173627677d2087f7615f78ae64dd903935da6
| 76,156
|
def popall(multidict, key):
""" Pops and returns all values of the key in multidict"""
values = multidict.getall(key)
if values:
del multidict[key]
return values
|
8eed9cee0ac6b66a1bf9c642d330cd715b1e8b57
| 76,158
|
def collection_keys(coll, sep='.'):
"""Get a list of all (including nested) keys in a collection.
Examines the first document in the collection.
:param sep: Separator for nested keys
:return: List of str
"""
def _keys(x, pre=''):
for k in x:
yield (pre + k)
if isinstance(x[k], dict):
for nested in _keys(x[k], pre + k + sep):
yield nested
return list(_keys(coll.find_one()))
|
7ac4b59865f97a92a0da31746843cc5a55f94821
| 76,159
|
def authenticate(username, password):
"""
Returns the user payload (dict) if login is valid else returns False.
"""
if username == "test" and password == "test":
user_payload = {"username": username}
return user_payload
else:
return False
|
184a34aad8b475db68bff3357a00228410865cff
| 76,162
|
from typing import List
def _parse_experience(experience: str) -> List[int]:
"""
Convert a string representation of a list of integers to a proper list of
ints.
"""
if not experience:
return list()
return [int(x) for x in experience.rstrip(",").split(",")]
|
958cb94afef1460c0fbb6eece6032db7b0d00e02
| 76,166
|
import inspect
import warnings
def _sort_key(test):
"""Accepts test method, returns module name and line number."""
method = getattr(test, test._testMethodName)
while hasattr(method, '_wrapped'): # If object is wrapped with a
method = method._wrapped # decorator, unwrap it.
try:
lineno = inspect.getsourcelines(method)[1]
except IOError:
warnings.warn('Unable to sort {0}'.format(method))
lineno = 0
return (method.__module__, lineno)
|
91aea49068c8011a06d2ce064e56b120067da7fe
| 76,168
|
from datetime import datetime
import click
def extract_simulation_times(logfile):
"""Returns a newline-separated string of (start, elapsed) times.
The starting timestamps are extracted from the logfile based on the
starting time stamp at which the Singularity/Docker container was invoked.
The elapsed times follow then from the difference between consecutive
starting time.
"""
def is_simulation_start(string: str):
"""Return True if the line started a Singularity/Docker run-command."""
string = string.lower()
return ('singularity run' in string) or ('docker run') in string
def to_timestamp(string):
"""Convert a log-line to its starting time as a datetime entry."""
time_stamp_format = '%Y-%m-%d %H:%M:%S'
return datetime.strptime(string[0:string.find(',')], time_stamp_format)
# Note: requires `click.open_file` to account for passing a dash (`-`) as
# the filename specifier to read from `stdin`.
with click.open_file(logfile, 'r') as log:
starts = list(map(to_timestamp, filter(is_simulation_start, log)))
timings = [f'{starts[0]}\tElapsed (HH:MM:SS)']
for start, stop in zip(starts[:-1], starts[1:]):
timings.append(f'{start}\t{stop - start}')
return "\n".join(timings)
|
de32f3f82e39ae2fd05f6f23df21ce577ea31eae
| 76,171
|
import math
def scale_sizes(size_dict, dimensions=1):
"""Normalizes a list of sizes so the largest is 1.0.
Use `dimensions` if the sizes are non-linear, i.e. 2 for scaling area.
"""
# x -> (x/max)^(1/dimensions)
max_size = float(max(size_dict.values()))
scaled_sizes = dict()
for k, v in size_dict.items():
scaled_sizes[k] = math.pow(v / max_size, 1.0 / dimensions)
return scaled_sizes
|
75a9290261d40686722bdae137c5f37847807065
| 76,172
|
import re
def get_train_dataset_name(config_train_name: str, shot: int, seed: int) -> str:
"""Returns a modified version of a given train dataset with proper shot and seed numbers"""
# We replace the given config name's shot number with ours, unless they are already equal.
if '1shot' in config_train_name: # Replace shot number
config_train_name = config_train_name.replace('1shot', str(shot) + 'shot')
elif f'{shot}shot' in config_train_name: # Leave shot as-is
assert False
# pass
else:
assert False
# Modify or add seed number in name.
if 'seed' in config_train_name:
# Replace current seed number
# Example: name_seedX with name_seed{SEED}, where SEED is our current seed
assert len(re.findall('seed.', config_train_name)) == 1
config_train_name = re.sub('seed.', 'seed{}'.format(seed), config_train_name)
else: # Append seed string
config_train_name = config_train_name + f'_seed{seed}'
return config_train_name
|
a0ad0ba4613e13f6d742f67735227b167f3498e0
| 76,175
|
import torch
def tensor_to_image(tensor):
"""
transforms a tensor to an image
Args:
tensor: tensor of size (1, 3, x, y)
Returns: tensor of size(x, y, 3)
"""
x, y = tensor.size()[-2:]
a, b, c = tensor[0]
return torch.cat(
(a.reshape(x, y, 1), b.reshape(x, y, 1), c.reshape(x, y, 1)), 2)
|
355698e3e44afd936a0bc92c8f74f8c7d10bf393
| 76,178
|
from typing import Callable
def search(low: int, high: int, test: Callable[[int], bool]) -> int:
"""Binary search: [low..high) is the range to search; function "test"
takes a single value from that interval and returns a truth value.
The function must be ascending: (test(x) and y >= x) => test(y).
Returns smallest argument in the interval for which the function is true,
or "high" if the function is false for the entire interval.
"""
while low < high:
mid = (low + high - 1) // 2
if test(mid):
if mid == low:
return low # found
high = mid + 1
else:
low = mid + 1
return high
|
047ee9fbb21461d02299f7d8a27dd37d29757717
| 76,181
|
def is_ipv4(hosts):
"""
Function to validate IPv4 Addresses
:param hosts: Takes a single host or subnet (ex. 127.0.0.1/24)
:return: Boolean True or False
"""
hosts = hosts.strip()
if "/" not in hosts:
# Assume that if no mask is specified, use a single host mask
hosts = hosts + "/32"
# Check if there are 4 octets and a cidr mask
if hosts.count(".") == 3:
# Check if the octets are no more than 255
mask = int(hosts.split("/")[-1])
octets = hosts.split("/")[0].split(".")
for octet in octets:
octet = int(octet)
if octet <= 255:
if mask <= 32:
# OK!
pass
else:
return False
else:
return False
return True
else:
return False
|
9990949e0faa05d1cbd70fd85cd82e1dc910c1c6
| 76,182
|
def getInputParameter(inputParametersPandas,key):
"""
Parameters:
-----------
inputParametersPandas : pandas.df
pandas dataframe with two columns
key : string
key string.
Returns:
-----------
string
the respective parameter value in the df
"""
#this locates the row, gets the result out of its array form and strips whitespaces away
return (((inputParametersPandas.loc[inputParametersPandas['key'] == key]).values)[0,1]).strip()
|
cf87b66e3b3cb18eae0070a0b66636166623fb8c
| 76,184
|
import json
import click
def upload_config(mwdb, family, config_file, config_type, parent, private, public, share_with):
"""Upload config object"""
with click.open_file(config_file, 'rb') as f:
content = json.loads(f.read())
obj = mwdb.upload_config(
family=family,
cfg=content,
config_type=config_type,
parent=parent,
private=private,
public=public,
share_with=share_with
)
return dict(message="Uploaded config {object_id}",
object_id=obj.id)
|
99a50eecfa68e7850c7d197ab4f60c45e6ec5d0c
| 76,185
|
from typing import Any
def isnan(x: Any) -> bool:
"""
Return True if x is NaN where x can be of any type
:param x: any object for which (in)equality can be checked
"""
return x != x
|
7e352628c7074654a3d32d3de83040e46d3748c1
| 76,191
|
def strings_intersect(s_one, s_two):
"""
Checks if two strings have any intersections
:param s_one: first string
:type s_one: str
:param s_two: second string
:type s_two: str
:return: whether or not these strings intercept
:rtype: bool
"""
return not set(s_one).isdisjoint(s_two)
|
2324c8560bf517e6e811115f3fc5b5da8cf7eb3b
| 76,192
|
def loadOutputList(expt_name,outputType):
"""Loads a file containing all file names with a specified output data into a local list
Args:
expt_name (String): Name of experiment (which contains how many ever simulation output files)
outputType (String): molpos or simtime. Determines which set of output filenames to load.
Returns:
TYPE: List
"""
datalist = []
if(outputType=='molpos'):
path='data/'+expt_name+'/outputMolposList.txt'
elif(outputType=='simtime'):
path='data/'+expt_name+'/outputSimtimeList.txt'
else:
raise Exception('outputType required to be either \'molpos\' or \'simtime\'')
with open(path) as f:
for outputfile in f:
datalist.append("data/"+expt_name+"/"+outputfile.rstrip())
return datalist
|
22f98f554b7cb6161a893a2f81cc4b916460ccc6
| 76,194
|
def _version_str(version):
"""
Convert a tuple of int's to a '.' separated str.
"""
return ".".join(map(str, version))
|
2c8422d86ef2cf9987a63f31f7c9c63623ed029a
| 76,197
|
def guess_xyz_columns(colnames):
"""
Given column names in a table, return the columns to use for x/y/z, or
None/None/None if no high confidence possibilities.
"""
# Do all the checks in lowercase
colnames_lower = [colname.lower() for colname in colnames]
for x, y, z in [('x', 'y', 'z')]:
# Check first for exact matches
x_match = [colname == x for colname in colnames_lower]
y_match = [colname == y for colname in colnames_lower]
z_match = [colname == z for colname in colnames_lower]
if sum(x_match) == 1 and sum(y_match) == 1 and sum(z_match) == 1:
return colnames[x_match.index(True)], colnames[y_match.index(True)], colnames[z_match.index(True)]
# Next check for columns that start with specified names
x_match = [colname.startswith(x) for colname in colnames_lower]
y_match = [colname.startswith(y) for colname in colnames_lower]
z_match = [colname.startswith(z) for colname in colnames_lower]
if sum(x_match) == 1 and sum(y_match) == 1 and sum(z_match) == 1:
return colnames[x_match.index(True)], colnames[y_match.index(True)], colnames[z_match.index(True)]
return None, None, None
|
46c36d67e42d50c7da4e5bd1ea2e73d850f16dae
| 76,199
|
from typing import Dict
from typing import Any
def empty_invoice_data(full_invoice_data: Dict[str, Any]) -> Dict[str, Any]:
"""Returns data for an `Invoice` without items.
Args:
full_invoice_data (Dict[str, Any]): Data for an invoice with items.
Returns:
Dict[str, Any]: Data for an empty `Invoice`.
"""
full_invoice_data.pop("items")
return full_invoice_data
|
2d5a41811c465bbfd8a0db79a365be22d4e43124
| 76,201
|
def tryGetListFromDict(d: dict, key: str):
"""
Gets element from dict, if not present returns empty list
:param d: dict
:param key: key for element
:return: element or empty list
"""
try:
return d[key]
except KeyError:
return []
|
73f1ffd8b13b56b9371e4c66db83f8c8dfa13a3b
| 76,203
|
def arduino_map(x, in_min, in_max, out_min, out_max):
"""Return x mapped from in range to out range.
>>> arduino_map(0, 0, 10, 100, 1000)
100
>>> arduino_map(5, 0, 10, 100, 1000)
550
>>> arduino_map(10, 0, 10, 100, 1000)
1000
>>> arduino_map(0, 10, 0, 100, 1000)
1000
>>> arduino_map(5, 10, 0, 100, 1000)
550
>>> arduino_map(10, 10, 0, 100, 1000)
100
>>> arduino_map(0, 0, 10, 1000, 100)
1000
>>> arduino_map(10, 0, 10, 1000, 100)
100
>>> arduino_map(0, -10, 10, -100, 100)
0
>>> arduino_map(128, 0, 255, -100, 100)
0
>>> arduino_map(128, 255, 0, 100, -100)
0
>>> arduino_map(255, 255, 0, 100, -100)
100
>>> arduino_map(0, 255, 0, 100, -100)
-100
"""
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
|
def5d846b1cde359c9b173f59fd054ef3acf2631
| 76,207
|
def hr_range_formatter(start, end, step):
"""Format a range (sequence) in a simple and human-readable format by
specifying the range's starting number, ending number (inclusive), and step
size.
Parameters
----------
start, end, step : numeric
Notes
-----
If `start` and `end` are integers and `step` is 1, step size is omitted.
The format does NOT follow Python's slicing syntax, in part because the
interpretation is meant to differ; e.g.,
'0-10:2' includes both 0 and 10 with step size of 2
whereas
0:10:2 (slicing syntax) excludes 10
Numbers are converted to integers if they are equivalent for more compact
display.
Examples
--------
>>> hr_range_formatter(start=0, end=10, step=1)
'0-10'
>>> hr_range_formatter(start=0, end=10, step=2)
'0-10:2'
>>> hr_range_formatter(start=0, end=3, step=8)
'0-3:8'
>>> hr_range_formatter(start=0.1, end=3.1, step=1.0)
'0.1-3.1:1'
"""
if int(start) == start:
start = int(start)
if int(end) == end:
end = int(end)
if int(step) == step:
step = int(step)
if int(start) == start and int(end) == end and step == 1:
return '{}-{}'.format(start, end)
return '{}-{}:{}'.format(start, end, step)
|
445a23c9931ef3b4a950227c51c0a4c551cf3c64
| 76,209
|
import math
def next_even_number(val):
"""Returns the next even number after the input. If input is even, returns the same value."""
return math.ceil(val / 2) * 2
|
700334e60c4abfa949524c1825b7ee97dd8f75cd
| 76,213
|
def has_token(node, token):
"""Has the node the specified token?"""
return any(filter(lambda t: t.spelling == token, node.get_tokens()))
|
e2e3d6a3c4393b557ab5a8c770ad94f2e5112344
| 76,216
|
import copy
def simplify(mapping, name, **params):
"""
Reduce the mapping.
The method modifies the self object.
Args:
mapping (Mapping): the mapping to be reduced
name (str): name of the reduced mapping
params (**kwargs):
'k' (int): desired number of classes (default: None).
'thresholds' (list): list of thresholds (default: None).
'centroids' (list): list of centroids (default: None).
'labels' (list): list of labels (default: None).
'method' (str): method for reduction of the mapping:
'linear', 'kmeans' (default: 'auto'=='kmeans')
Applicable if neither thresholds nor centroids given.
'iters' (int): number of iterations if method is 'kmeans'
(default: 0)
'label_from' (int): numerical label of the first class.
Applicable if 'mean_labels' is False.
'mean_labels' (bool): whether class labels are to be numerical
(False, default) or be means of elements
in the class (True).
'default': default value if an input value not in the mapping
keys (the default default is 'self.default' if 'mean_labels'
is True; otherwise None).
Returns the reduced mapping.
Raises:
ValueError if:
if more than one from 'thresholds', 'centroids' and 'k'
are provided at the same time.
if neither 'thresholds' nor 'centroids' nor 'k'
are provided and also 'labels' are not provided.
'k' < 1,
'method' is neither 'linear' nor 'kmeans' nor 'auto',
number of 'labels' is less than number of classes.
Method simplifies the mapping according to provided 'thresholds'
or 'centroids' or cluster the data to 'k' classes. If none of these
is specified, the desired number of classes is deduced from the number
of labels.
Clustering using the 'linear' method simply divide the 1d space evenly.
Clustering using the 'kmeans' method divide the 1d space using even
percentiles of the original mapping values distribution. Then k-means
iterations are performed if 'iters' > 0.
If 'labels' are given, they are assigned to classes. Otherwise, if
'mean_labels' are set to True, mean values in each class is assigned
as the class label; otherwise, class number + 'label_from" is assigned
as the class label. So created class labels become the mapping values
of the new reduced mapping.
Note that it is often required to adjust the 'default' value for the
input values not in the mapping keys (except if new mapping labels are
'mean_labels').
"""
new_mapping = copy.deepcopy(mapping)
new_mapping.__name__ = name
return new_mapping.simplify(**params)
|
5185ab0f0b3d9540f1c6701b1d9cf848fd9b3fe8
| 76,218
|
def load_labels(labels_path):
"""Loads labels of the Standard365 Places2 Dataset
Parameters
----------
labels_path : str
The location of the Standard365 Place2 labels file.
Returns
-------
Two lists. First represents class labels. Second
indoors/outdoors labels matching the first list`s labels.
"""
classes = []
outdoors_indoors = []
with open(labels_path) as labels_file:
for line in labels_file:
classes.append(line[3:line.find(' ')])
outdoors_indoors.append(line[line.find(' '):].strip())
return classes, outdoors_indoors
|
e5db98f147c8a363dbea4fa59b66a46ef8d1c5b0
| 76,219
|
def sequence(find, numbers):
"""This function checks to see if an object is in a sequence
>>> sequence(1, [1,2,3,4])
1
>>> sequence("i", "Hello world")
'Nothing'
>>> sequence(4, (2,4,6))
4
"""
for n in numbers:
if find == n:
return(n)
else:
return("Nothing")
|
e82540f2fcef534ecbaa5c2fff03d5de3f31bdf2
| 76,220
|
from typing import Tuple
def match_shape_lengths(s1: Tuple, s2: Tuple) -> Tuple:
"""
Ensure s1 and s2 have the same count of axes with the largest count of axes
as the desired size. If s1 or s2 is shorter in axis count, add new axes of dimension 1 to the
left of the shape.
Returns the correctly sized two shapes. See twit_test.py
This is used to get two tensor shapes able to be used by twit with the same number of axes each.
"""
if len(s2) == 0:
raise AttributeError("Tensor destination shape can not be length 0, nowhere to put the results!")
while len(s1) < len(s2):
s1 = (1,) + s1
while len(s2) < len(s1):
s2 = (1,) + s2
return s1, s2
|
5f0ddfcbfb0d6523a795b3b81f3268c9501c5512
| 76,221
|
def get_cat_code_dict(df, col):
"""Returns a dict to pase a categorical column to another categorical but using integer codes in order to use it with shap functions.
Parameters
----------
df : pd.DataFrame
col : str
Column to transform
Returns
-------
dict
Original names as key and codes as values.
"""
d = dict(
zip(
df[col],
df[col].cat.codes.astype("category")
)
)
return d
|
9b0cb51072133cf1896f1bd24313f9160c05fd37
| 76,222
|
def is_super(node):
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, 'name', None) == 'super' and \
node.root().name == '__builtin__':
return True
return False
|
bea8392d69271c20c6b0648fdd20e67006c55016
| 76,227
|
def shu_osher_change_alpha_ij(alpha,beta,i,j,val):
"""
**Input**:
- alpha, beta: Shu-Osher arrays
- i,j: indices
- val -- real number
**Output**: Shu-Osher arrays alph, bet with alph[i,j]=alpha[i,j]+val.
"""
alpha[i,j] = alpha[i,j]+val
alpha[i,:] -= val*alpha[j,:]
beta[i,:] -= val* beta[j,:]
return alpha,beta
|
f1ce978eb667dcc6c0a1e14c7a0675b1fe78a521
| 76,228
|
import typing
def no_parsing(f: typing.Callable):
"""Wrap a method under test so that it skips input parsing."""
return lambda *args, **kwargs: f(*args, _parse=False, **kwargs)
|
ca2222ef87f25dda6beb2a9af12dbd5e2f76ea01
| 76,229
|
from typing import List
from typing import Dict
import pathlib
import json
def seed_game_entries() -> List[Dict]:
"""Consumes a `seed.json` file and outputs a list of dicts
"""
data = None
with open(pathlib.Path(__file__).parent.absolute()/'seed.json') as in_file:
data = json.load(in_file)
return data
|
03f0d32642e5eb3924a0d33ab4b3a02f49a5a1a2
| 76,232
|
def return_duplicates(d_list):
"""
A 'set()' is a kind of list that can not have duplicates.
Steps to follow to return the duplicates of a list:
1) create an empty list
2) create an empty set
3) Step through each item in the list by using a for-loop
4) Get the length of the set
5) Attempt to add an item from the list into the set
6) Get the new length of the set
7) Compare the two lengths: if they are the same, the item is a duplicate
8) Append the duplicate item to the list of duplicates
9) At the end of the loop, return the list of duplicates
:param d_list: the list
:return: list of duplicates
"""
duplicates = []
my_set = set()
for i in d_list:
length_one = len(my_set)
my_set.add(i)
length_two = len(my_set)
if length_one == length_two:
duplicates.append(i)
return duplicates
|
700efb1b03266d5465d1ac2795d2a05cbb300243
| 76,234
|
import uuid
import json
def _create_uuid(filename):
"""Create a uuid for the device."""
with open(filename, "w") as fptr:
new_uuid = uuid.uuid4()
fptr.write(json.dumps({"nexia_uuid": str(new_uuid)}))
return new_uuid
|
26ab24f444377b9f2c63b569af1b3924099f1a84
| 76,236
|
def reverse_rec(tempstr):
""" recursively reverses a string and returns it """
if tempstr == "": #checks if string is empty
return ''
else:
#calls reverse_rec starting from the second element and concatinating the first element to it
return reverse_rec(tempstr[1:]) + tempstr[0]
|
1e507abd9b466dcecf40b3d3216f0ddf33ca5645
| 76,237
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.